OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowIterationState
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const int thread_idx, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
std::string toString () const
 
std::string summaryToString () const
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &, std::shared_ptr< VarlenOutputInfo >=nullptr) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, size_t top_n, const ExecutorDeviceType device_type, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step) More...
 
void invalidateCachedRowCount () const
 
void setCachedRowCount (const size_t row_count) const
 
bool isEmpty () const
 Returns a boolean signifying whether there are valid entries in the result set. More...
 
size_t entryCount () const
 Returns the number of entries the result set is allocated to hold. More...
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
std::string getExplanation () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
int getThreadIdx () const
 
std::string getString (SQLTypeInfo const &, int64_t const ival) const
 
ScalarTargetValue convertToScalarTargetValue (SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
 
bool isLessThan (SQLTypeInfo const &, int64_t const lhs, int64_t const rhs) const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const PermutationgetPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
ResultSetPtr copy ()
 
void clearPermutation ()
 
void initStatus ()
 
void invalidateResultSetChunks ()
 
const bool isEstimator () const
 
void setCached (bool val)
 
const bool isCached () const
 
void setExecTime (const long exec_time)
 
const long getExecTime () const
 
void setQueryPlanHash (const QueryPlanHash query_plan)
 
const QueryPlanHash getQueryPlanHash ()
 
std::unordered_set< size_t > getInputTableKeys () const
 
void setInputTableKeys (std::unordered_set< size_t > &&intput_table_keys)
 
void setTargetMetaInfo (const std::vector< TargetMetaInfo > &target_meta_info)
 
std::vector< TargetMetaInfogetTargetMetaInfo ()
 
std::optional< bool > canUseSpeculativeTopNSort () const
 
void setUseSpeculativeTopNSort (bool value)
 
const bool hasValidBuffer () const
 
unsigned getBlockSize () const
 
unsigned getGridSize () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
const size_t getColumnarBufferSize (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
bool areAnyColumnsLazyFetched () const
 
size_t getNumColumnsLazyFetched () const
 
void setSeparateVarlenStorageValid (const bool val)
 
const std::vector< std::string > getStringDictionaryPayloadCopy (const shared::StringDictKey &dict_key) const
 
const std::pair< std::vector
< int32_t >, std::vector
< std::string > > 
getUniqueStringsForDictEncodedTargetCol (const size_t col_idx) const
 
StringDictionaryProxygetStringDictionaryProxy (const shared::StringDictKey &dict_key) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
ChunkStats getTableFunctionChunkStats (const size_t target_idx) const
 
void translateDictEncodedColumns (std::vector< TargetInfo > const &, size_t const start_idx)
 
void eachCellInColumn (RowIterationState &, CellCallback const &)
 
const ExecutorgetExecutor () const
 
bool checkSlotUsesFlatBufferFormat (const size_t slot_idx) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static bool isNullIval (SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
 
static ScalarTargetValue nullScalarTargetValue (SQLTypeInfo const &, bool const translate_strings)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 
static double calculateQuantile (quantile::TDigest *const t_digest)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using ApproxQuantileBuffers = std::vector< std::vector< double >>
 
using ModeBuffers = std::vector< std::vector< int64_t >>
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
ScalarTargetValue makeStringTargetValue (SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
const VarlenOutputInfogetVarlenOutputInfo (const size_t entry_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
Comparator createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
 
PermutationView initPermutationBuffer (PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const ExecutorDeviceType device_type, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
size_t rowCountImpl (const bool force_parallel) const
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (CountDistinctSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static PermutationView topPermutation (PermutationView, const size_t n, const Comparator &)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
const int thread_idx_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
Permutation permutation_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
bool cached_
 
size_t query_exec_time_
 
QueryPlanHash query_plan_
 
std::unordered_set< size_t > input_table_keys_
 
std::vector< TargetMetaInfotarget_meta_info_
 
std::optional< bool > can_use_speculative_top_n_sort
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 157 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::ApproxQuantileBuffers = std::vector<std::vector<double>>
private

Definition at line 829 of file ResultSet.h.

using ResultSet::ModeBuffers = std::vector<std::vector<int64_t>>
private

Definition at line 830 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 977 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 542 of file ResultSet.h.

542  {
545  WktString,
548  GeoTargetValueGpuPtr
550  };
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:187
boost::variant< GeoPointTargetValuePtr, GeoMultiPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoMultiLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:193

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 64 of file ResultSet.cpp.

70  : targets_(targets)
71  , device_type_(device_type)
72  , device_id_(-1)
73  , thread_idx_(-1)
74  , query_mem_desc_(query_mem_desc)
76  , fetched_so_far_(0)
77  , drop_first_(0)
78  , keep_first_(0)
79  , row_set_mem_owner_(row_set_mem_owner)
80  , block_size_(block_size)
81  , grid_size_(grid_size)
82  , data_mgr_(nullptr)
84  , just_explain_(false)
85  , for_validation_only_(false)
88  , cached_(false)
89  , query_exec_time_(0)
91  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:983
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
size_t query_exec_time_
Definition: ResultSet.h:993
size_t keep_first_
Definition: ResultSet.h:953
const bool just_explain_
Definition: ResultSet.h:982
unsigned block_size_
Definition: ResultSet.h:957
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
size_t drop_first_
Definition: ResultSet.h:952
bool cached_
Definition: ResultSet.h:991
unsigned grid_size_
Definition: ResultSet.h:958
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:974
const int thread_idx_
Definition: ResultSet.h:946
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
size_t fetched_so_far_
Definition: ResultSet.h:951
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
QueryPlanHash query_plan_
Definition: ResultSet.h:994
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:945
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const int  thread_idx,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 93 of file ResultSet.cpp.

105  : targets_(targets)
106  , device_type_(device_type)
107  , device_id_(device_id)
108  , thread_idx_(thread_idx)
109  , query_mem_desc_(query_mem_desc)
110  , crt_row_buff_idx_(0)
111  , fetched_so_far_(0)
112  , drop_first_(0)
113  , keep_first_(0)
114  , row_set_mem_owner_(row_set_mem_owner)
115  , block_size_(block_size)
116  , grid_size_(grid_size)
117  , lazy_fetch_info_(lazy_fetch_info)
118  , col_buffers_{col_buffers}
119  , frag_offsets_{frag_offsets}
120  , consistent_frag_sizes_{consistent_frag_sizes}
121  , data_mgr_(nullptr)
123  , just_explain_(false)
124  , for_validation_only_(false)
127  , cached_(false)
128  , query_exec_time_(0)
130  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:983
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
size_t query_exec_time_
Definition: ResultSet.h:993
size_t keep_first_
Definition: ResultSet.h:953
const bool just_explain_
Definition: ResultSet.h:982
unsigned block_size_
Definition: ResultSet.h:957
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
size_t drop_first_
Definition: ResultSet.h:952
bool cached_
Definition: ResultSet.h:991
unsigned grid_size_
Definition: ResultSet.h:958
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:974
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
const int thread_idx_
Definition: ResultSet.h:946
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:969
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
size_t fetched_so_far_
Definition: ResultSet.h:951
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
QueryPlanHash query_plan_
Definition: ResultSet.h:994
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:968
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:945
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 165 of file ResultSet.cpp.

References CPU.

167  , device_id_(-1)
168  , thread_idx_(-1)
169  , fetched_so_far_(0)
171  , explanation_(explanation)
172  , just_explain_(true)
173  , for_validation_only_(false)
176  , cached_(false)
177  , query_exec_time_(0)
179  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:983
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
size_t query_exec_time_
Definition: ResultSet.h:993
const bool just_explain_
Definition: ResultSet.h:982
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
bool cached_
Definition: ResultSet.h:991
const int thread_idx_
Definition: ResultSet.h:946
std::string explanation_
Definition: ResultSet.h:981
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
size_t fetched_so_far_
Definition: ResultSet.h:951
QueryPlanHash query_plan_
Definition: ResultSet.h:994
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:945
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 181 of file ResultSet.cpp.

References CPU.

185  , device_id_(-1)
186  , thread_idx_(-1)
187  , fetched_so_far_(0)
188  , row_set_mem_owner_(row_set_mem_owner)
189  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
191  , just_explain_(true)
192  , for_validation_only_(false)
195  , cached_(false)
196  , query_exec_time_(0)
198  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:983
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
size_t query_exec_time_
Definition: ResultSet.h:993
const bool just_explain_
Definition: ResultSet.h:982
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
bool cached_
Definition: ResultSet.h:991
QueryExecutionTimings timings_
Definition: ResultSet.h:959
const int thread_idx_
Definition: ResultSet.h:946
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
size_t fetched_so_far_
Definition: ResultSet.h:951
QueryPlanHash query_plan_
Definition: ResultSet.h:994
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:945
ResultSet::~ResultSet ( )

Definition at line 200 of file ResultSet.cpp.

References CHECK, CPU, and data_mgr_().

200  {
201  if (storage_) {
202  if (!storage_->buff_is_provided_) {
203  CHECK(storage_->getUnderlyingBuffer());
204  free(storage_->getUnderlyingBuffer());
205  }
206  }
207  for (auto& storage : appended_storage_) {
208  if (storage && !storage->buff_is_provided_) {
209  free(storage->getUnderlyingBuffer());
210  }
211  }
215  }
217  CHECK(data_mgr_);
219  }
220 }
AppendedStorage appended_storage_
Definition: ResultSet.h:949
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:974
int8_t * host_estimator_buffer_
Definition: ResultSet.h:973
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:614
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:972

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 724 of file ResultSet.cpp.

724  {
725  timings_.compilation_queue_time += compilation_queue_time;
726 }
QueryExecutionTimings timings_
Definition: ResultSet.h:959
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &  ,
std::shared_ptr< VarlenOutputInfo = nullptr 
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 303 of file ResultSet.cpp.

References CHECK.

303  {
305  if (!that.storage_) {
306  return;
307  }
308  appended_storage_.push_back(std::move(that.storage_));
311  appended_storage_.back()->query_mem_desc_.getEntryCount());
312  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
313  col_buffers_.insert(
314  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
315  frag_offsets_.insert(
316  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
318  that.consistent_frag_sizes_.begin(),
319  that.consistent_frag_sizes_.end());
320  chunk_iters_.insert(
321  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
323  CHECK(that.separate_varlen_storage_valid_);
325  that.serialized_varlen_buffer_.begin(),
326  that.serialized_varlen_buffer_.end());
327  }
328  for (auto& buff : that.literal_buffers_) {
329  literal_buffers_.push_back(std::move(buff));
330  }
331 }
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:949
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:962
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:979
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:611
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:961
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:965
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:969
#define CHECK(condition)
Definition: Logger.h:291
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:968
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
bool ResultSet::areAnyColumnsLazyFetched ( ) const
inline

Definition at line 585 of file ResultSet.h.

References anonymous_namespace{QueryMemoryDescriptor.cpp}::any_of(), and lazy_fetch_info_.

585  {
586  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
587  return std::any_of(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
588  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
bool any_of(std::vector< Analyzer::Expr * > const &target_exprs)

+ Here is the call graph for this function:

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const ExecutorDeviceType  device_type,
const Executor executor 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 622 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

622  {
623  if (!storage_) {
624  return 0;
625  }
626 
627  size_t row_count = storage_->binSearchRowCount();
628  for (auto& s : appended_storage_) {
629  row_count += s->binSearchRowCount();
630  }
631 
632  return get_truncated_row_count(row_count, getLimit(), drop_first_);
633 }
AppendedStorage appended_storage_
Definition: ResultSet.h:949
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
size_t getLimit() const
Definition: ResultSet.cpp:1409
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:545
size_t drop_first_
Definition: ResultSet.h:952

+ Here is the call graph for this function:

double ResultSet::calculateQuantile ( quantile::TDigest *const  t_digest)
static

Definition at line 1047 of file ResultSet.cpp.

References CHECK, quantile::detail::TDigest< RealType, IndexType >::mergeBufferFinal(), NULL_DOUBLE, and quantile::detail::TDigest< RealType, IndexType >::quantile().

Referenced by makeTargetValue().

1047  {
1048  static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
1049  CHECK(t_digest);
1050  t_digest->mergeBufferFinal();
1051  double const quantile = t_digest->quantile();
1052  return boost::math::isnan(quantile) ? NULL_DOUBLE : quantile;
1053 }
#define NULL_DOUBLE
DEVICE RealType quantile(VectorView< IndexType const > const partial_sum, RealType const q) const
Definition: quantile.h:858
DEVICE void mergeBufferFinal()
Definition: quantile.h:683
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
std::optional<bool> ResultSet::canUseSpeculativeTopNSort ( ) const
inline

Definition at line 522 of file ResultSet.h.

References can_use_speculative_top_n_sort.

522  {
524  }
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
bool ResultSet::checkSlotUsesFlatBufferFormat ( const size_t  slot_idx) const
inline

Definition at line 633 of file ResultSet.h.

References QueryMemoryDescriptor::checkSlotUsesFlatBufferFormat(), and query_mem_desc_.

633  {
635  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
bool checkSlotUsesFlatBufferFormat(const size_t slot_idx) const

+ Here is the call graph for this function:

void ResultSet::clearPermutation ( )
inline

Definition at line 467 of file ResultSet.h.

References permutation_.

Referenced by initStatus().

467  {
468  if (!permutation_.empty()) {
469  permutation_.clear();
470  }
471  }
Permutation permutation_
Definition: ResultSet.h:955

+ Here is the caller graph for this function:

size_t ResultSet::colCount ( ) const

Definition at line 416 of file ResultSet.cpp.

416  {
417  return just_explain_ ? 1 : targets_.size();
418 }
const bool just_explain_
Definition: ResultSet.h:982
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
ScalarTargetValue ResultSet::convertToScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  val 
) const

Definition at line 1088 of file ResultSetIteration.cpp.

References CHECK_EQ, SQLTypeInfo::get_compression(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, kFLOAT, and makeStringTargetValue().

Referenced by makeTargetValue().

1090  {
1091  if (ti.is_string()) {
1092  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1093  return makeStringTargetValue(ti, translate_strings, val);
1094  } else {
1095  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(shared::bit_cast<double>(val))
1096  : ti.is_any<kFLOAT>() ? ScalarTargetValue(shared::bit_cast<float>(val))
1097  : ScalarTargetValue(val);
1098  }
1099 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ResultSetPtr ResultSet::copy ( )

Definition at line 333 of file ResultSet.cpp.

References CHECK, gpu_enabled::copy(), and DEBUG_TIMER.

333  {
334  auto timer = DEBUG_TIMER(__func__);
335  if (!storage_) {
336  return nullptr;
337  }
338 
339  auto executor = getExecutor();
340  CHECK(executor);
341  ResultSetPtr copied_rs = std::make_shared<ResultSet>(targets_,
342  device_type_,
345  executor->blockSize(),
346  executor->gridSize());
347 
348  auto allocate_and_copy_storage =
349  [&](const ResultSetStorage* prev_storage) -> std::unique_ptr<ResultSetStorage> {
350  const auto& prev_qmd = prev_storage->query_mem_desc_;
351  const auto storage_size = prev_qmd.getBufferSizeBytes(device_type_);
352  auto buff = row_set_mem_owner_->allocate(storage_size, /*thread_idx=*/0);
353  std::unique_ptr<ResultSetStorage> new_storage;
354  new_storage.reset(new ResultSetStorage(
355  prev_storage->targets_, prev_qmd, buff, /*buff_is_provided=*/true));
356  new_storage->target_init_vals_ = prev_storage->target_init_vals_;
357  if (prev_storage->varlen_output_info_) {
358  new_storage->varlen_output_info_ = prev_storage->varlen_output_info_;
359  }
360  memcpy(new_storage->buff_, prev_storage->buff_, storage_size);
361  new_storage->query_mem_desc_ = prev_qmd;
362  return new_storage;
363  };
364 
365  copied_rs->storage_ = allocate_and_copy_storage(storage_.get());
366  if (!appended_storage_.empty()) {
367  for (const auto& storage : appended_storage_) {
368  copied_rs->appended_storage_.push_back(allocate_and_copy_storage(storage.get()));
369  }
370  }
371  std::copy(chunks_.begin(), chunks_.end(), std::back_inserter(copied_rs->chunks_));
372  std::copy(chunk_iters_.begin(),
373  chunk_iters_.end(),
374  std::back_inserter(copied_rs->chunk_iters_));
375  std::copy(col_buffers_.begin(),
376  col_buffers_.end(),
377  std::back_inserter(copied_rs->col_buffers_));
378  std::copy(frag_offsets_.begin(),
379  frag_offsets_.end(),
380  std::back_inserter(copied_rs->frag_offsets_));
383  std::back_inserter(copied_rs->consistent_frag_sizes_));
387  std::back_inserter(copied_rs->serialized_varlen_buffer_));
388  }
389  std::copy(literal_buffers_.begin(),
390  literal_buffers_.end(),
391  std::back_inserter(copied_rs->literal_buffers_));
392  std::copy(lazy_fetch_info_.begin(),
393  lazy_fetch_info_.end(),
394  std::back_inserter(copied_rs->lazy_fetch_info_));
395 
396  copied_rs->permutation_ = permutation_;
397  copied_rs->drop_first_ = drop_first_;
398  copied_rs->keep_first_ = keep_first_;
399  copied_rs->separate_varlen_storage_valid_ = separate_varlen_storage_valid_;
400  copied_rs->query_exec_time_ = query_exec_time_;
401  copied_rs->input_table_keys_ = input_table_keys_;
402  copied_rs->target_meta_info_ = target_meta_info_;
403  copied_rs->geo_return_type_ = geo_return_type_;
404  copied_rs->query_plan_ = query_plan_;
406  copied_rs->can_use_speculative_top_n_sort = can_use_speculative_top_n_sort;
407  }
408 
409  return copied_rs;
410 }
Permutation permutation_
Definition: ResultSet.h:955
AppendedStorage appended_storage_
Definition: ResultSet.h:949
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:1001
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
size_t query_exec_time_
Definition: ResultSet.h:993
std::shared_ptr< ResultSet > ResultSetPtr
size_t keep_first_
Definition: ResultSet.h:953
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:962
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:979
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
const Executor * getExecutor() const
Definition: ResultSet.h:631
size_t drop_first_
Definition: ResultSet.h:952
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:961
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:965
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:995
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:996
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:969
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:412
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
QueryPlanHash query_plan_
Definition: ResultSet.h:994
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:968
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980

+ Here is the call graph for this function:

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copies its content into a contiguous output_buffer

Definition at line 1171 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1173  {
1175  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1176  CHECK(output_buffer_size > 0);
1177  CHECK(output_buffer);
1178  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1179  size_t out_buff_offset = 0;
1180 
1181  // the main storage:
1182  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1183  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1184  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1185  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1186  CHECK(crt_buffer_size <= output_buffer_size);
1187  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1188 
1189  out_buff_offset += crt_buffer_size;
1190 
1191  // the appended storages:
1192  for (size_t i = 0; i < appended_storage_.size(); i++) {
1193  const size_t crt_storage_row_count =
1194  appended_storage_[i]->query_mem_desc_.getEntryCount();
1195  if (crt_storage_row_count == 0) {
1196  // skip an empty appended storage
1197  continue;
1198  }
1199  CHECK_LT(out_buff_offset, output_buffer_size);
1200  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1201  const size_t column_offset =
1202  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1203  const int8_t* storage_buffer =
1204  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1205  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1206  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1207 
1208  out_buff_offset += crt_buffer_size;
1209  }
1210 }
AppendedStorage appended_storage_
Definition: ResultSet.h:949
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK(condition)
Definition: Logger.h:291
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1477

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( CountDistinctSet count_distinct_active_buffer_set) const
private
Comparator ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const PermutationView  permutation,
const Executor executor,
const bool  single_threaded 
)
inlineprivate

Definition at line 877 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

880  {
881  auto timer = DEBUG_TIMER(__func__);
883  return [rsc = ResultSetComparator<ColumnWiseTargetAccessor>(
884  order_entries, this, permutation, executor, single_threaded)](
885  const PermutationIdx lhs, const PermutationIdx rhs) {
886  return rsc(lhs, rhs);
887  };
888  } else {
889  return [rsc = ResultSetComparator<RowWiseTargetAccessor>(
890  order_entries, this, permutation, executor, single_threaded)](
891  const PermutationIdx lhs, const PermutationIdx rhs) {
892  return rsc(lhs, rhs);
893  };
894  }
895  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define DEBUG_TIMER(name)
Definition: Logger.h:412

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 674 of file ResultSet.cpp.

674  {
675  return (!storage_ && !estimator_ && !just_explain_) || cached_row_count_ == 0;
676 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
const bool just_explain_
Definition: ResultSet.h:982
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:971
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 560 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

560 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 59 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

59  {
61  drop_first_ = n;
62 }
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:611
size_t drop_first_
Definition: ResultSet.h:952
constexpr double n
Definition: Utm.h:38
void ResultSet::eachCellInColumn ( RowIterationState state,
CellCallback const &  func 
)

Definition at line 491 of file ResultSet.cpp.

References advance_slot(), advance_to_next_columnar_target_buff(), ResultSet::RowIterationState::agg_idx_, align_to_int64(), ResultSet::RowIterationState::buf_ptr_, CHECK, CHECK_GE, CHECK_LT, ResultSet::RowIterationState::compact_sz1_, ResultSet::RowIterationState::cur_target_idx_, QueryMemoryDescriptor::didOutputColumnar(), get_cols_ptr(), get_key_bytes_rowwise(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), ResultSet::RowIterationState::prev_target_idx_, read_int_from_buff(), and row_ptr_rowwise().

491  {
492  size_t const target_idx = state.cur_target_idx_;
493  QueryMemoryDescriptor& storage_qmd = storage_->query_mem_desc_;
494  CHECK_LT(target_idx, lazy_fetch_info_.size());
495  auto& col_lazy_fetch = lazy_fetch_info_[target_idx];
496  CHECK(col_lazy_fetch.is_lazily_fetched);
497  int const target_size = storage_->targets_[target_idx].sql_type.get_size();
498  CHECK_LT(0, target_size) << storage_->targets_[target_idx].toString();
499  size_t const nrows = storage_->binSearchRowCount();
500  if (storage_qmd.didOutputColumnar()) {
501  // Logic based on ResultSet::ColumnWiseTargetAccessor::initializeOffsetsForStorage()
502  if (state.buf_ptr_ == nullptr) {
503  state.buf_ptr_ = get_cols_ptr(storage_->buff_, storage_qmd);
504  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
505  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
507  }
508  for (size_t j = state.prev_target_idx_; j < state.cur_target_idx_; ++j) {
509  size_t const next_target_idx = j + 1; // Set state to reflect next target_idx j+1
510  state.buf_ptr_ = advance_to_next_columnar_target_buff(
511  state.buf_ptr_, storage_qmd, state.agg_idx_);
512  auto const& next_agg_info = storage_->targets_[next_target_idx];
513  state.agg_idx_ =
514  advance_slot(state.agg_idx_, next_agg_info, separate_varlen_storage_valid_);
515  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
516  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
518  }
519  for (size_t i = 0; i < nrows; ++i) {
520  int8_t const* const pos_ptr = state.buf_ptr_ + i * state.compact_sz1_;
521  int64_t pos = read_int_from_buff(pos_ptr, target_size);
522  CHECK_GE(pos, 0);
523  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
524  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
525  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
526  func(col_frag + pos * target_size);
527  }
528  } else {
529  size_t const key_bytes_with_padding =
531  for (size_t i = 0; i < nrows; ++i) {
532  int8_t const* const keys_ptr = row_ptr_rowwise(storage_->buff_, storage_qmd, i);
533  int8_t const* const rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
534  int64_t pos = *reinterpret_cast<int64_t const*>(rowwise_target_ptr);
535  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
536  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
537  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
538  func(col_frag + pos * target_size);
539  }
540  }
541 }
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
size_t getEffectiveKeyWidth() const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const

+ Here is the call graph for this function:

size_t ResultSet::entryCount ( ) const

Returns the number of entries the result set is allocated to hold.

Note that this can be greater than or equal to the actual number of valid rows in the result set, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by)

For getting the number of valid rows in the result set (inclusive of any applied LIMIT and/or OFFSET), use ResultSet::rowCount(). Or to just test if there are any valid rows, use ResultSet::entryCount(), as a return value from entryCount() greater than 0 does not neccesarily mean the result set is empty.

Definition at line 752 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

752  {
753  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
754 }
Permutation permutation_
Definition: ResultSet.h:955
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 429 of file ResultSet.h.

References CHECK, and storage_.

429  {
430  CHECK(storage_);
431  if (storage_->query_mem_desc_.didOutputColumnar()) {
432  storage_->fillOneEntryColWise(entry);
433  } else {
434  storage_->fillOneEntryRowWise(entry);
435  }
436  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK(condition)
Definition: Logger.h:291
ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 951 of file ResultSet.cpp.

Referenced by getVarlenOutputInfo(), and makeGeoTargetValue().

951  {
952  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
953  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
954  fixedup_entry_idx,
955  stg_idx};
956 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:926
AppendedStorage appended_storage_
Definition: ResultSet.h:949
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 766 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), Executor::executeTableFunction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initRowGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

767  {
768  auto query_mem_desc_copy = query_mem_desc;
769  query_mem_desc_copy.resetGroupColWidths(
770  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
771  if (query_mem_desc.didOutputColumnar()) {
772  return query_mem_desc_copy;
773  }
774  query_mem_desc_copy.alignPaddedSlots();
775  return query_mem_desc_copy;
776 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

unsigned ResultSet::getBlockSize ( ) const
inline

Definition at line 535 of file ResultSet.h.

References block_size_.

535 { return block_size_; }
unsigned block_size_
Definition: ResultSet.h:957
size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 756 of file ResultSetIteration.cpp.

References CHECK, and storage_.

756  {
757  CHECK(storage_);
758  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
759 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK(condition)
Definition: Logger.h:291
SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 420 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

420  {
421  if (just_explain_) {
422  return SQLTypeInfo(kTEXT, false);
423  }
424  CHECK_LT(col_idx, targets_.size());
425  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
426  : targets_[col_idx].sql_type;
427 }
const bool just_explain_
Definition: ResultSet.h:982
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
#define CHECK_LT(x, y)
Definition: Logger.h:303
Definition: sqltypes.h:79
Definition: sqldefs.h:77
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1343 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1345  {
1346  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1347  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1348  const auto column_offset =
1349  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1350  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1351  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1352  storage_->query_mem_desc_.getEntryCount();
1353  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1354  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1355 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK_NE(x, y)
Definition: Logger.h:302
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1508 of file ResultSet.cpp.

References CHECK.

1508  {
1510  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1511 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1499
#define CHECK(condition)
Definition: Logger.h:291
size_t getColOffInBytes(const size_t col_idx) const
const size_t ResultSet::getColumnarBufferSize ( size_t  column_idx) const

Definition at line 1513 of file ResultSet.cpp.

1513  {
1514  const auto col_context = query_mem_desc_.getColSlotContext();
1515  const auto idx = col_context.getSlotsForCol(column_idx).front();
1517  if (checkSlotUsesFlatBufferFormat(idx)) {
1518  return query_mem_desc_.getFlatBufferSize(idx);
1519  }
1520  const size_t padded_slot_width = static_cast<size_t>(getPaddedSlotWidthBytes(idx));
1521  return padded_slot_width * entryCount();
1522 }
bool checkSlotUsesFlatBufferFormat(const size_t slot_idx) const
Definition: ResultSet.h:633
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
int64_t getPaddedSlotBufferSize(const size_t slot_idx) const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
Definition: ResultSet.h:570
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
const ColSlotContext & getColSlotContext() const
const std::vector< size_t > & getSlotsForCol(const size_t col_idx) const
int64_t getFlatBufferSize(const size_t slot_idx) const
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1289 of file ResultSetIteration.cpp.

References storage_.

1291  {
1292  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1293  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1294  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1295 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1136 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1138  {
1139  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1140  if (col_buffers_[storage_idx].size() > 1) {
1141  int64_t frag_id = 0;
1142  int64_t local_idx = global_idx;
1143  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1144  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1145  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1146  } else {
1147  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1148  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1149  CHECK_LE(local_idx, global_idx);
1150  }
1151  CHECK_GE(frag_id, int64_t(0));
1152  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1153  global_idx = local_idx;
1154  return col_buffers_[storage_idx][frag_id];
1155  } else {
1156  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1157  return col_buffers_[storage_idx][0];
1158  }
1159 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
#define CHECK_GE(x, y)
Definition: Logger.h:306
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK_LE(x, y)
Definition: Logger.h:304
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:969
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:968
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 295 of file ResultSet.cpp.

295  {
296  if (crt_row_buff_idx_ == 0) {
297  throw std::runtime_error("current row buffer iteration index is undefined");
298  }
299  return crt_row_buff_idx_ - 1;
300 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 692 of file ResultSet.cpp.

References CHECK, and GPU.

692  {
696 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:972
int ResultSet::getDeviceId ( ) const

Definition at line 758 of file ResultSet.cpp.

758  {
759  return device_id_;
760 }
const int device_id_
Definition: ResultSet.h:945
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 254 of file ResultSet.cpp.

254  {
255  return device_type_;
256 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1213 of file ResultSetIteration.cpp.

References heavyai::GroupByBaselineHash, heavyai::GroupByPerfectHash, and UNREACHABLE.

1215  {
1216  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1217  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1218  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1219  } else {
1220  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1221  }
1222  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1223  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1224  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1225  } else {
1226  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1227  }
1228  } else {
1229  UNREACHABLE() << "Invalid query type is used";
1230  return 0;
1231  }
1232 }
GroupByPerfectHash
Definition: enums.h:58
#define UNREACHABLE()
Definition: Logger.h:338
GroupByBaselineHash
Definition: enums.h:58
const long ResultSet::getExecTime ( ) const
inline

Definition at line 502 of file ResultSet.h.

References query_exec_time_.

502 { return query_exec_time_; }
size_t query_exec_time_
Definition: ResultSet.h:993
const Executor* ResultSet::getExecutor ( ) const
inline

Definition at line 631 of file ResultSet.h.

References QueryMemoryDescriptor::getExecutor(), and query_mem_desc_.

631 { return query_mem_desc_.getExecutor(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
const Executor * getExecutor() const

+ Here is the call graph for this function:

std::string ResultSet::getExplanation ( ) const
inline

Definition at line 393 of file ResultSet.h.

References explanation_, and just_explain_.

393  {
394  if (just_explain_) {
395  return explanation_;
396  }
397  return {};
398  }
const bool just_explain_
Definition: ResultSet.h:982
std::string explanation_
Definition: ResultSet.h:981
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 551 of file ResultSet.h.

References geo_return_type_.

551 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
int ResultSet::getGpuCount ( ) const
private
unsigned ResultSet::getGridSize ( ) const
inline

Definition at line 537 of file ResultSet.h.

References grid_size_.

537 { return grid_size_; }
unsigned grid_size_
Definition: ResultSet.h:958
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 698 of file ResultSet.cpp.

698  {
699  return host_estimator_buffer_;
700 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:973
std::unordered_set<size_t> ResultSet::getInputTableKeys ( ) const
inline

Definition at line 508 of file ResultSet.h.

References input_table_keys_.

508 { return input_table_keys_; }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:995
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 581 of file ResultSet.h.

References lazy_fetch_info_.

581  {
582  return lazy_fetch_info_;
583  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
size_t ResultSet::getLimit ( ) const

Definition at line 1409 of file ResultSet.cpp.

1409  {
1410  return keep_first_;
1411 }
size_t keep_first_
Definition: ResultSet.h:953
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, CHECK_LE, LOG, and logger::WARNING.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  if (bits_set == 0) {
38  // empty result set, return 1 for a groups buffer size of 1
39  return 1;
40  }
41  const auto total_bits = estimator_->getBufferSize() * 8;
42  CHECK_LE(bits_set, total_bits);
43  const auto unset_bits = total_bits - bits_set;
44  const auto ratio = static_cast<double>(unset_bits) / total_bits;
45  if (ratio == 0.) {
46  LOG(WARNING)
47  << "Failed to get a high quality cardinality estimation, falling back to "
48  "approximate group by buffer size guess.";
49  return 0;
50  }
51  return -static_cast<double>(total_bits) * log(ratio);
52 }
#define LOG(tag)
Definition: Logger.h:285
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:971
#define CHECK_LE(x, y)
Definition: Logger.h:304
int8_t * host_estimator_buffer_
Definition: ResultSet.h:973
#define CHECK(condition)
Definition: Logger.h:291
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 296 of file ResultSetIteration.cpp.

297  {
298  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299  if (!storage_ && !just_explain_) {
300  return {};
301  }
302  return getNextRowUnlocked(translate_strings, decimal_to_double);
303 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:985
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
const bool just_explain_
Definition: ResultSet.h:982
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 318 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

319  {
320  size_t entry_buff_idx = 0;
321  do {
323  return {};
324  }
325 
326  entry_buff_idx = advanceCursorToNextEntry();
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
330  return {};
331  }
333  ++fetched_so_far_;
334 
335  } while (drop_first_ && fetched_so_far_ <= drop_first_);
336 
337  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
338  CHECK(!row.empty());
339 
340  return row;
341 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:953
size_t drop_first_
Definition: ResultSet.h:952
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:291
size_t fetched_so_far_
Definition: ResultSet.h:951
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
size_t advanceCursorToNextEntry() const
std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 305 of file ResultSetIteration.cpp.

307  {
308  if (just_explain_) {
309  if (fetched_so_far_) {
310  return {};
311  }
312  fetched_so_far_ = 1;
313  return {explanation_};
314  }
315  return getNextRowImpl(translate_strings, decimal_to_double);
316 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:982
std::string explanation_
Definition: ResultSet.h:981
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:951
size_t ResultSet::getNumColumnsLazyFetched ( ) const
inline

Definition at line 590 of file ResultSet.h.

References lazy_fetch_info_.

590  {
591  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
592  return std::count_if(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
593  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 234 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), and row_ptr_rowwise().

234  {
235  const auto storage_lookup_result = findStorage(global_entry_idx);
236  const auto storage = storage_lookup_result.storage_ptr;
237  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238  if (storage->isEmptyEntry(local_entry_idx)) {
239  return {0, false};
240  }
241  const auto buff = storage->buff_;
242  CHECK(buff);
244  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245  const auto key_bytes_with_padding =
247  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
249  keys_ptr,
250  global_entry_idx,
251  targets_.front(),
252  0,
253  0,
254  false,
255  false,
256  false);
257  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
258  CHECK(scalar_tv);
259  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
260  CHECK(ival_ptr);
261  return {*ival_ptr, true};
262 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:951
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK(condition)
Definition: Logger.h:291
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 570 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

570  {
571  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
572  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const Permutation & ResultSet::getPermutationBuffer ( ) const

Definition at line 874 of file ResultSet.cpp.

874  {
875  return permutation_;
876 }
Permutation permutation_
Definition: ResultSet.h:955
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 566 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

566  {
568  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 678 of file ResultSet.cpp.

References CHECK.

678  {
679  CHECK(storage_);
680  return storage_->query_mem_desc_;
681 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK(condition)
Definition: Logger.h:291
const QueryPlanHash ResultSet::getQueryPlanHash ( )
inline

Definition at line 506 of file ResultSet.h.

References query_plan_.

506 { return query_plan_; }
QueryPlanHash query_plan_
Definition: ResultSet.h:994
int64_t ResultSet::getQueueTime ( ) const

Definition at line 728 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 733 of file ResultSet.cpp.

733  {
734  return timings_.render_time;
735 }
QueryExecutionTimings timings_
Definition: ResultSet.h:959
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 273 of file ResultSetIteration.cpp.

275  {
276  if (logical_index >= entryCount()) {
277  return {};
278  }
279  const auto entry_idx =
280  permutation_.empty() ? logical_index : permutation_[logical_index];
281  return getRowAt(entry_idx, false, false, false, targets_to_skip);
282 }
Permutation permutation_
Definition: ResultSet.h:955
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 450 of file ResultSet.h.

References row_set_mem_owner_.

450  {
451  return row_set_mem_owner_;
452  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1321 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1323  {
1324  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1325  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1326  auto keys_ptr = row_ptr_rowwise(
1327  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1328  const auto column_offset =
1329  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1330  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1331  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1332  const auto storage_buffer = keys_ptr + column_offset;
1333  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1334 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK_NE(x, y)
Definition: Logger.h:302
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1304 of file ResultSetIteration.cpp.

References storage_.

1306  {
1307  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1308  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1309  const int8_t* storage_buffer =
1310  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1311  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1312 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1525 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1525  {
1526  std::vector<bool> target_bitmap(targets_.size(), true);
1527  size_t num_single_slot_targets = 0;
1528  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1529  const auto& sql_type = targets_[target_idx].sql_type;
1530  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1531  target_bitmap[target_idx] = false;
1532  } else if (sql_type.is_varlen()) {
1533  target_bitmap[target_idx] = false;
1534  } else {
1535  num_single_slot_targets++;
1536  }
1537  }
1538  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1539 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
Definition: sqldefs.h:77

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1569 of file ResultSet.cpp.

References advance_slot().

1569  {
1570  std::vector<size_t> slot_indices(targets_.size(), 0);
1571  size_t slot_index = 0;
1572  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1573  slot_indices[target_idx] = slot_index;
1574  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1575  }
1576  return slot_indices;
1577 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 412 of file ResultSet.cpp.

412  {
413  return storage_.get();
414 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 926 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

926  {
927  size_t fixedup_entry_idx = entry_idx;
928  auto entry_count = storage_->query_mem_desc_.getEntryCount();
929  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
930  if (fixedup_entry_idx < entry_count) {
931  return {0, fixedup_entry_idx};
932  }
933  fixedup_entry_idx -= entry_count;
934  for (size_t i = 0; i < appended_storage_.size(); ++i) {
935  const auto& desc = appended_storage_[i]->query_mem_desc_;
936  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
937  entry_count = desc.getEntryCount();
938  if (fixedup_entry_idx < entry_count) {
939  return {i + 1, fixedup_entry_idx};
940  }
941  fixedup_entry_idx -= entry_count;
942  }
943  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
945  return {};
946 }
AppendedStorage appended_storage_
Definition: ResultSet.h:949
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define UNREACHABLE()
Definition: Logger.h:338
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK_NE(x, y)
Definition: Logger.h:302

+ Here is the caller graph for this function:

std::string ResultSet::getString ( SQLTypeInfo const &  ti,
int64_t const  ival 
) const

Definition at line 2057 of file ResultSetIteration.cpp.

References StringDictionaryProxy::getString(), SQLTypeInfo::getStringDictKey(), and row_set_mem_owner_.

Referenced by anonymous_namespace{ResultSetIteration.cpp}::build_string_array_target_value(), isLessThan(), and makeStringTargetValue().

2057  {
2058  const auto& dict_key = ti.getStringDictKey();
2059  StringDictionaryProxy* sdp;
2060  if (dict_key.dict_id) {
2061  constexpr bool with_generation = false;
2062  sdp = dict_key.db_id > 0
2063  ? row_set_mem_owner_->getOrAddStringDictProxy(dict_key, with_generation)
2064  : row_set_mem_owner_->getStringDictProxy(
2065  dict_key); // unit tests bypass the catalog
2066  } else {
2067  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
2068  }
2069  return sdp->getString(ival);
2070 }
std::string getString(int32_t string_id) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const std::vector< std::string > ResultSet::getStringDictionaryPayloadCopy ( const shared::StringDictKey dict_key) const

Definition at line 1413 of file ResultSet.cpp.

References CHECK.

1414  {
1415  const auto sdp =
1416  row_set_mem_owner_->getOrAddStringDictProxy(dict_key, /*with_generation=*/true);
1417  CHECK(sdp);
1418  return sdp->getDictionary()->copyStrings();
1419 }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
#define CHECK(condition)
Definition: Logger.h:291
StringDictionaryProxy * ResultSet::getStringDictionaryProxy ( const shared::StringDictKey dict_key) const

Definition at line 429 of file ResultSet.cpp.

References shared::StringDictKey::db_id, shared::StringDictKey::dict_id, and dict_ref_t::literalsDictId.

430  {
431  constexpr bool with_generation = true;
432  return (dict_key.db_id > 0 || dict_key.dict_id == DictRef::literalsDictId)
433  ? row_set_mem_owner_->getOrAddStringDictProxy(dict_key, with_generation)
434  : row_set_mem_owner_->getStringDictProxy(dict_key);
435 }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
static constexpr int32_t literalsDictId
Definition: DictRef.h:18
std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1549 of file ResultSet.cpp.

References CHECK, CHECK_GE, is_distinct_target(), kFLOAT, and kSAMPLE.

1550  {
1552  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1553 
1554  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1555  const auto& target = targets_[target_idx];
1556  if (single_slot_targets[target_idx] &&
1557  (is_distinct_target(target) ||
1558  shared::is_any<kAPPROX_QUANTILE, kMODE>(target.agg_kind) ||
1559  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1560  single_slot_targets[target_idx] = false;
1561  num_single_slot_targets--;
1562  }
1563  }
1564  CHECK_GE(num_single_slot_targets, size_t(0));
1565  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1566 }
#define CHECK_GE(x, y)
Definition: Logger.h:306
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1525
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:102
#define CHECK(condition)
Definition: Logger.h:291
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1477

+ Here is the call graph for this function:

ChunkStats ResultSet::getTableFunctionChunkStats ( const size_t  target_idx) const
const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 683 of file ResultSet.cpp.

683  {
684  return targets_;
685 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 687 of file ResultSet.cpp.

References CHECK.

687  {
688  CHECK(storage_);
689  return storage_->target_init_vals_;
690 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
#define CHECK(condition)
Definition: Logger.h:291
std::vector<TargetMetaInfo> ResultSet::getTargetMetaInfo ( )
inline

Definition at line 520 of file ResultSet.h.

References target_meta_info_.

520 { return target_meta_info_; }
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:996
TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 2266 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), getTargetValueFromFlatBuffer(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), FlatBufferManager::isFlatBuffer(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and SQLTypeInfo::usesFlatBuffer().

2276  {
2278  const auto col1_ptr = col_ptr;
2279  if (target_info.sql_type.usesFlatBuffer()) {
2281  << "target_info.sql_type=" << target_info.sql_type;
2282  return getTargetValueFromFlatBuffer(col_ptr,
2283  target_info,
2284  slot_idx,
2285  target_logical_idx,
2286  global_entry_idx,
2287  local_entry_idx,
2288  translate_strings,
2290  }
2291  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
2292  const auto next_col_ptr =
2293  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
2294  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2295  is_real_str_or_array(target_info))
2296  ? next_col_ptr
2297  : nullptr;
2298  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2299  is_real_str_or_array(target_info))
2300  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
2301  : 0;
2302  // TODO(Saman): add required logics for count distinct
2303  // geospatial target values:
2304  if (target_info.sql_type.is_geometry()) {
2305  return makeGeoTargetValue(
2306  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
2307  }
2308 
2309  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
2310  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2311  CHECK(col2_ptr);
2312  CHECK(compact_sz2);
2313  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
2314  return target_info.agg_kind == kAVG
2315  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2316  : makeVarlenTargetValue(ptr1,
2317  compact_sz1,
2318  ptr2,
2319  compact_sz2,
2320  target_info,
2321  target_logical_idx,
2322  translate_strings,
2323  global_entry_idx);
2324  }
2326  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2327  return makeTargetValue(ptr1,
2328  compact_sz1,
2329  target_info,
2330  target_logical_idx,
2331  translate_strings,
2333  global_entry_idx);
2334  }
2335  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2336  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
2337  CHECK_GE(key_idx, 0);
2338  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
2339  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
2340  key_width,
2341  target_info,
2342  target_logical_idx,
2343  translate_strings,
2345  global_entry_idx);
2346 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define CHECK_GE(x, y)
Definition: Logger.h:306
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
TargetValue getTargetValueFromFlatBuffer(const int8_t *col_ptr, const TargetInfo &target_info, const size_t slot_idx, const size_t target_logical_idx, const size_t global_entry_idx, const size_t local_entry_idx, const bool translate_strings, const std::shared_ptr< RowSetMemoryOwner > &row_set_mem_owner_)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
bool usesFlatBuffer() const
Definition: sqltypes.h:1083
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:597
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:528
Definition: sqldefs.h:77
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 2350 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), UNLIKELY, and SQLTypeInfo::usesFlatBuffer().

2359  {
2360  // FlatBuffer can exists only in a columnar storage. If the
2361  // following check fails it means that storage specific attributes
2362  // of type info have leaked.
2363  CHECK(!target_info.sql_type.usesFlatBuffer());
2364 
2365  if (UNLIKELY(fixup_count_distinct_pointers)) {
2366  if (is_distinct_target(target_info)) {
2367  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2368  const auto remote_ptr = *count_distinct_ptr_ptr;
2369  if (remote_ptr) {
2370  const auto ptr = storage_->mappedPtr(remote_ptr);
2371  if (ptr) {
2372  *count_distinct_ptr_ptr = ptr;
2373  } else {
2374  // need to create a zero filled buffer for this remote_ptr
2375  const auto& count_distinct_desc =
2376  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2377  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2378  ? count_distinct_desc.bitmapSizeBytes()
2379  : count_distinct_desc.bitmapPaddedSizeBytes();
2380  constexpr size_t thread_idx{0};
2381  row_set_mem_owner_->initCountDistinctBufferAllocator(bitmap_byte_sz,
2382  thread_idx);
2383  auto count_distinct_buffer =
2384  row_set_mem_owner_->allocateCountDistinctBuffer(bitmap_byte_sz, thread_idx);
2385  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2386  }
2387  }
2388  }
2389  return int64_t(0);
2390  }
2391  if (target_info.sql_type.is_geometry()) {
2392  return makeGeoTargetValue(
2393  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2394  }
2395 
2396  auto ptr1 = rowwise_target_ptr;
2397  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2399  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2400  // Single column perfect hash group by can utilize one slot for both the key and the
2401  // target value if both values fit in 8 bytes. Use the target value actual size for
2402  // this case. If they don't, the target value should be 8 bytes, so we can still use
2403  // the actual size rather than the compact size.
2404  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2405  }
2406 
2407  // logic for deciding width of column
2408  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2409  const auto ptr2 =
2410  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2411  int8_t compact_sz2 = 0;
2412  // Skip reading the second slot if we have a none encoded string and are using
2413  // the none encoded strings buffer attached to ResultSetStorage
2415  (target_info.sql_type.is_array() ||
2416  (target_info.sql_type.is_string() &&
2417  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2418  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2419  }
2420  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2421  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2422  }
2423  CHECK(ptr2);
2424  return target_info.agg_kind == kAVG
2425  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2426  : makeVarlenTargetValue(ptr1,
2427  compact_sz1,
2428  ptr2,
2429  compact_sz2,
2430  target_info,
2431  target_logical_idx,
2432  translate_strings,
2433  entry_buff_idx);
2434  }
2436  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2437  return makeTargetValue(ptr1,
2438  compact_sz1,
2439  target_info,
2440  target_logical_idx,
2441  translate_strings,
2443  entry_buff_idx);
2444  }
2445  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2446  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2447  return makeTargetValue(ptr1,
2448  key_width,
2449  target_info,
2450  target_logical_idx,
2451  translate_strings,
2453  entry_buff_idx);
2454 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:102
bool usesFlatBuffer() const
Definition: sqltypes.h:1083
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:597
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
bool is_string() const
Definition: sqltypes.h:561
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:77
bool is_array() const
Definition: sqltypes.h:585
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

int ResultSet::getThreadIdx ( ) const

Definition at line 762 of file ResultSet.cpp.

References thread_idx_().

762  {
763  return thread_idx_;
764 }
const int thread_idx_
Definition: ResultSet.h:946

+ Here is the call graph for this function:

const std::pair< std::vector< int32_t >, std::vector< std::string > > ResultSet::getUniqueStringsForDictEncodedTargetCol ( const size_t  col_idx) const

Definition at line 1422 of file ResultSet.cpp.

References CHECK, and inline_fixed_encoding_null_val().

1422  {
1423  const auto col_type_info = getColType(col_idx);
1424  std::unordered_set<int32_t> unique_string_ids_set;
1425  const size_t num_entries = entryCount();
1426  std::vector<bool> targets_to_skip(colCount(), true);
1427  targets_to_skip[col_idx] = false;
1428  CHECK(col_type_info.is_dict_encoded_type()); // Array<Text> or Text
1429  const int64_t null_val = inline_fixed_encoding_null_val(
1430  col_type_info.is_array() ? col_type_info.get_elem_type() : col_type_info);
1431 
1432  for (size_t row_idx = 0; row_idx < num_entries; ++row_idx) {
1433  const auto result_row = getRowAtNoTranslations(row_idx, targets_to_skip);
1434  if (!result_row.empty()) {
1435  if (const auto scalar_col_val =
1436  boost::get<ScalarTargetValue>(&result_row[col_idx])) {
1437  const int32_t string_id =
1438  static_cast<int32_t>(boost::get<int64_t>(*scalar_col_val));
1439  if (string_id != null_val) {
1440  unique_string_ids_set.emplace(string_id);
1441  }
1442  } else if (const auto array_col_val =
1443  boost::get<ArrayTargetValue>(&result_row[col_idx])) {
1444  if (*array_col_val) {
1445  for (const ScalarTargetValue& scalar : array_col_val->value()) {
1446  const int32_t string_id = static_cast<int32_t>(boost::get<int64_t>(scalar));
1447  if (string_id != null_val) {
1448  unique_string_ids_set.emplace(string_id);
1449  }
1450  }
1451  }
1452  }
1453  }
1454  }
1455 
1456  const size_t num_unique_strings = unique_string_ids_set.size();
1457  std::vector<int32_t> unique_string_ids(num_unique_strings);
1458  size_t string_idx{0};
1459  for (const auto unique_string_id : unique_string_ids_set) {
1460  unique_string_ids[string_idx++] = unique_string_id;
1461  }
1462 
1463  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1464  col_type_info.getStringDictKey(), /*with_generation=*/true);
1465  CHECK(sdp);
1466 
1467  return std::make_pair(unique_string_ids, sdp->getStrings(unique_string_ids));
1468 }
size_t colCount() const
Definition: ResultSet.cpp:416
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
SQLTypeInfo getColType(const size_t col_idx) const
Definition: ResultSet.cpp:420
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:291
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto data_mgr = executor->getDataMgr();
636  auto allocator = std::make_unique<CudaAllocator>(
637  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
638  allocator->copyFromDevice(
639  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
640  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
641  } else {
643  host_str_ptr = reinterpret_cast<char*>(str_ptr);
644  }
645  std::string str(host_str_ptr, str_len);
646  return InternalTargetValue(row_set_mem_owner_->addString(str));
647 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:945

+ Here is the call graph for this function:

const VarlenOutputInfo * ResultSet::getVarlenOutputInfo ( const size_t  entry_idx) const
private

Definition at line 1161 of file ResultSetIteration.cpp.

References CHECK, and findStorage().

Referenced by makeGeoTargetValue().

1161  {
1162  auto storage_lookup_result = findStorage(entry_idx);
1163  CHECK(storage_lookup_result.storage_ptr);
1164  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1165 }
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:951
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::hasValidBuffer ( ) const
inline

Definition at line 528 of file ResultSet.h.

References storage_.

528  {
529  if (storage_) {
530  return true;
531  }
532  return false;
533  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 443 of file ResultSet.h.

References chunk_iters_.

443  {
444  chunk_iters_.push_back(chunk_iters);
445  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:962
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 440 of file ResultSet.h.

References chunks_.

440  {
441  chunks_ = chunks;
442  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:961
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 446 of file ResultSet.h.

References literal_buffers_.

446  {
447  literal_buffers_.push_back(std::move(literal_buff));
448  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:965
void ResultSet::initializeStorage ( ) const

Definition at line 1042 of file ResultSetReduction.cpp.

1042  {
1044  storage_->initializeColWise();
1045  } else {
1046  storage_->initializeRowWise();
1047  }
1048 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
PermutationView ResultSet::initPermutationBuffer ( PermutationView  permutation,
PermutationIdx const  begin,
PermutationIdx const  end 
) const
private

Definition at line 858 of file ResultSet.cpp.

References CHECK, DEBUG_TIMER, and VectorView< T >::push_back().

860  {
861  auto timer = DEBUG_TIMER(__func__);
862  for (PermutationIdx i = begin; i < end; ++i) {
863  const auto storage_lookup_result = findStorage(i);
864  const auto lhs_storage = storage_lookup_result.storage_ptr;
865  const auto off = storage_lookup_result.fixedup_entry_idx;
866  CHECK(lhs_storage);
867  if (!lhs_storage->isEmptyEntry(off)) {
868  permutation.push_back(i);
869  }
870  }
871  return permutation;
872 }
DEVICE void push_back(T const &value)
Definition: VectorView.h:73
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:951
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:412

+ Here is the call graph for this function:

void ResultSet::initStatus ( )
inline

Definition at line 473 of file ResultSet.h.

References clearPermutation(), crt_row_buff_idx_, drop_first_, fetched_so_far_, invalidateCachedRowCount(), keep_first_, setGeoReturnType(), and WktString.

473  {
474  // todo(yoonmin): what else we additionally need to consider
475  // to make completely clear status of the resultset for reuse?
476  crt_row_buff_idx_ = 0;
477  fetched_so_far_ = 0;
481  drop_first_ = 0;
482  keep_first_ = 0;
483  }
void setGeoReturnType(const GeoReturnType val)
Definition: ResultSet.h:552
size_t keep_first_
Definition: ResultSet.h:953
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:611
size_t drop_first_
Definition: ResultSet.h:952
size_t fetched_so_far_
Definition: ResultSet.h:951
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
void clearPermutation()
Definition: ResultSet.h:467

+ Here is the call graph for this function:

void ResultSet::invalidateCachedRowCount ( ) const

Definition at line 611 of file ResultSet.cpp.

References uninitialized_cached_row_count.

Referenced by initStatus().

611  {
613 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:984
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52

+ Here is the caller graph for this function:

void ResultSet::invalidateResultSetChunks ( )
inline

Definition at line 485 of file ResultSet.h.

References chunk_iters_, and chunks_.

485  {
486  if (!chunks_.empty()) {
487  chunks_.clear();
488  }
489  if (!chunk_iters_.empty()) {
490  chunk_iters_.clear();
491  }
492  };
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:962
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:961
const bool ResultSet::isCached ( ) const
inline

Definition at line 498 of file ResultSet.h.

References cached_.

498 { return cached_; }
bool cached_
Definition: ResultSet.h:991
bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1477 of file ResultSet.cpp.

References CHECK, g_enable_direct_columnarization, heavyai::GroupByBaselineHash, heavyai::GroupByPerfectHash, heavyai::Projection, and heavyai::TableFunction.

Referenced by copyColumnIntoBuffer().

1477  {
1479  return false;
1480  } else if (query_mem_desc_.didOutputColumnar()) {
1481  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1489  } else {
1492  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1496  }
1497 }
GroupByPerfectHash
Definition: enums.h:58
Permutation permutation_
Definition: ResultSet.h:955
bool g_enable_direct_columnarization
Definition: Execute.cpp:134
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
Projection
Definition: enums.h:58
TableFunction
Definition: enums.h:58
QueryDescriptionType getQueryDescriptionType() const
GroupByBaselineHash
Definition: enums.h:58
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

bool ResultSet::isEmpty ( ) const

Returns a boolean signifying whether there are valid entries in the result set.

Note a result set can be logically empty even if the value returned by ResultSet::entryCount() is > 0, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function is just implemented as ResultSet::rowCount() == 0, which caches it's value so the row count will only be computed once per finalized result set.

Definition at line 655 of file ResultSet.cpp.

655  {
656  // To simplify this function and de-dup logic with ResultSet::rowCount()
657  // (mismatches between the two were causing bugs), we modified this function
658  // to simply fetch rowCount(). The potential downside of this approach is that
659  // in some cases more work will need to be done, as we can't just stop at the first row.
660  // Mitigating that for most cases is the following:
661  // 1) rowCount() is cached, so the logic for actually computing row counts will run only
662  // once
663  // per result set.
664  // 2) If the cache is empty (cached_row_count_ == -1), rowCount() will use parallel
665  // methods if deemed appropriate, which in many cases could be faster for a sparse
666  // large result set that single-threaded iteration from the beginning
667  // 3) Often where isEmpty() is needed, rowCount() is also needed. Since the first call
668  // to rowCount()
669  // will be cached, there is no extra overhead in these cases
670 
671  return rowCount() == size_t(0);
672 }
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:599
const bool ResultSet::isEstimator ( ) const
inline

Definition at line 494 of file ResultSet.h.

References estimator_.

494 { return !estimator_; }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:971
bool ResultSet::isExplain ( ) const

Definition at line 746 of file ResultSet.cpp.

746  {
747  return just_explain_;
748 }
const bool just_explain_
Definition: ResultSet.h:982
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1489 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1489  {
1490  // This should match the logic in makeGeoTargetValue which ultimately calls
1491  // fetch_data_from_gpu when the geo column is on the device.
1492  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1493  // utility function that handles this logic in one place
1494  CHECK_LT(col_idx, targets_.size());
1495  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1496  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1497  " is not a geo column. It is of type " +
1498  targets_[col_idx].sql_type.get_type_name() + ".");
1499  }
1500 
1501  const auto& target_info = targets_[col_idx];
1502  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1503  return false;
1504  }
1505 
1506  if (!lazy_fetch_info_.empty()) {
1507  CHECK_LT(col_idx, lazy_fetch_info_.size());
1508  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1509  return false;
1510  }
1511  }
1512 
1514 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
#define CHECK_LT(x, y)
Definition: Logger.h:303
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
#define IS_GEO(T)
Definition: sqltypes.h:310

+ Here is the call graph for this function:

bool ResultSet::isLessThan ( SQLTypeInfo const &  ti,
int64_t const  lhs,
int64_t const  rhs 
) const

Definition at line 1111 of file ResultSetIteration.cpp.

References shared::bit_cast(), CHECK_EQ, SQLTypeInfo::get_compression(), getString(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, and kFLOAT.

1113  {
1114  if (ti.is_string()) {
1115  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1116  return getString(ti, lhs) < getString(ti, rhs);
1117  } else {
1118  return ti.is_any<kDOUBLE>()
1119  ? shared::bit_cast<double>(lhs) < shared::bit_cast<double>(rhs)
1120  : ti.is_any<kFLOAT>()
1121  ? shared::bit_cast<float>(lhs) < shared::bit_cast<float>(rhs)
1122  : lhs < rhs;
1123  }
1124 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
TO bit_cast(FROM &&from)
Definition: misc.h:307
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2601 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), and null_val_bit_pattern().

2603  {
2604  if (ti.get_notnull()) {
2605  return false;
2606  }
2607  if (val.isInt()) {
2608  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2609  }
2610  if (val.isPair()) {
2611  return !val.i2;
2612  }
2613  if (val.isStr()) {
2614  return !val.i1;
2615  }
2616  CHECK(val.isNull());
2617  return true;
2618 }
bool isPair() const
Definition: TargetValue.h:65
bool isStr() const
Definition: TargetValue.h:69
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isNull() const
Definition: TargetValue.h:67
bool isInt() const
Definition: TargetValue.h:63
#define CHECK(condition)
Definition: Logger.h:291
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:398

+ Here is the call graph for this function:

bool ResultSet::isNullIval ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  ival 
)
static

Definition at line 1126 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1128  {
1129  return ti.is_any<kDOUBLE>() ? shared::bit_cast<double>(ival) == NULL_DOUBLE
1130  : ti.is_any<kFLOAT>() ? shared::bit_cast<float>(ival) == NULL_FLOAT
1131  : ti.is_string() ? translate_strings ? ival == NULL_INT : ival == 0
1132  : ival == inline_int_null_val(ti);
1133 }
#define NULL_DOUBLE
#define NULL_FLOAT
#define NULL_INT
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 455 of file ResultSet.h.

References permutation_.

455 { return permutation_.empty(); };
Permutation permutation_
Definition: ResultSet.h:955
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 284 of file ResultSetIteration.cpp.

284  {
285  if (logical_index >= entryCount()) {
286  return true;
287  }
288  const auto entry_idx =
289  permutation_.empty() ? logical_index : permutation_[logical_index];
290  const auto storage_lookup_result = findStorage(entry_idx);
291  const auto storage = storage_lookup_result.storage_ptr;
292  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293  return storage->isEmptyEntry(local_entry_idx);
294 }
Permutation permutation_
Definition: ResultSet.h:955
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:951
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool ResultSet::isTruncated ( ) const

Definition at line 742 of file ResultSet.cpp.

742  {
743  return keep_first_ + drop_first_;
744 }
size_t keep_first_
Definition: ResultSet.h:953
size_t drop_first_
Definition: ResultSet.h:952
bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 754 of file ResultSet.cpp.

754  {
755  return for_validation_only_;
756 }
bool for_validation_only_
Definition: ResultSet.h:983
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1499 of file ResultSet.cpp.

References heavyai::Projection, and heavyai::TableFunction.

1499  {
1504  appended_storage_.empty() && storage_ &&
1505  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1506 }
AppendedStorage appended_storage_
Definition: ResultSet.h:949
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
Projection
Definition: enums.h:58
TableFunction
Definition: enums.h:58
QueryDescriptionType getQueryDescriptionType() const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 54 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

54  {
56  keep_first_ = n;
57 }
size_t keep_first_
Definition: ResultSet.h:953
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:611
constexpr double n
Definition: Utm.h:38
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 649 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

651  {
652  if (!lazy_fetch_info_.empty()) {
653  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
654  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
655  if (col_lazy_fetch.is_lazily_fetched) {
656  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
657  col_buffers_.size());
658  int64_t ival_copy = ival;
659  auto& frag_col_buffers =
660  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
661  target_logical_idx,
662  ival_copy);
663  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
664  CHECK_LT(target_logical_idx, targets_.size());
665  const TargetInfo& target_info = targets_[target_logical_idx];
666  CHECK(!target_info.is_agg);
667  if (target_info.sql_type.is_string() &&
668  target_info.sql_type.get_compression() == kENCODING_NONE) {
669  VarlenDatum vd;
670  bool is_end{false};
672  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
673  storage_lookup_result.fixedup_entry_idx,
674  false,
675  &vd,
676  &is_end);
677  CHECK(!is_end);
678  if (vd.is_null) {
679  return 0;
680  }
681  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
682  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
683  }
684  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
685  }
686  }
687  return ival;
688 }
bool is_null
Definition: Datum.h:59
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
int8_t * pointer
Definition: Datum.h:58
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:943
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
#define CHECK_LT(x, y)
Definition: Logger.h:303
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: Datum.h:57

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1652 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), getVarlenOutputInfo(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, FlatBufferManager::isFlatBuffer(), kENCODING_GEOINT, kLINESTRING, kMULTILINESTRING, kMULTIPOINT, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, NestedArrayToGeoTargetValue(), query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, QueryMemoryDescriptor::slotIsVarlenOutput(), TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1656  {
1657  CHECK(target_info.sql_type.is_geometry());
1658 
1659  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1660  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1661  };
1662 
1663  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1664  const auto storage_info = findStorage(entry_buff_idx);
1665  auto crt_geo_col_ptr = geo_target_ptr;
1666  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1667  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1668  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1669  }
1670  // adjusting the column pointer to represent a pointer to the geo target value
1671  return crt_geo_col_ptr +
1672  storage_info.fixedup_entry_idx *
1673  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1674  slot_idx + range);
1675  };
1676 
1677  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1679  ? getNextTargetBufferColWise(slot_idx, range)
1680  : getNextTargetBufferRowWise(slot_idx, range);
1681  };
1682 
1683  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1684  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1686  };
1687 
1688  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1689  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1691  };
1692 
1693  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1694  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1696  };
1697 
1698  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1699  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1701  };
1702 
1703  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1704  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1706  };
1707 
1708  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1709  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1711  };
1712 
1713  auto getFragColBuffers = [&]() -> decltype(auto) {
1714  const auto storage_idx = getStorageIndex(entry_buff_idx);
1715  CHECK_LT(storage_idx.first, col_buffers_.size());
1716  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1717  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1718  };
1719 
1720  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1721 
1722  auto getDataMgr = [&]() {
1723  auto executor = query_mem_desc_.getExecutor();
1724  CHECK(executor);
1725  return executor->getDataMgr();
1726  };
1727 
1728  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1729  const auto storage_idx = getStorageIndex(entry_buff_idx);
1730  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1731  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1732  return varlen_buffer;
1733  };
1734 
1735  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1736  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1737  return TargetValue(nullptr);
1738  }
1739 
1740  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1741  if (!lazy_fetch_info_.empty()) {
1742  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1743  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1744  }
1745 
1746  switch (target_info.sql_type.get_type()) {
1747  case kPOINT: {
1748  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1749  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1750  CHECK(varlen_output_info);
1751  auto geo_data_ptr = read_int_from_buff(
1752  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1753  auto cpu_data_ptr =
1754  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1755  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1756  target_info.sql_type,
1758  /*data_mgr=*/nullptr,
1759  /*is_gpu_fetch=*/false,
1760  device_id_,
1761  cpu_data_ptr,
1762  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1763  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1764  const auto& varlen_buffer = getSeparateVarlenStorage();
1765  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1766  varlen_buffer.size());
1767 
1768  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1769  target_info.sql_type,
1771  nullptr,
1772  false,
1773  device_id_,
1774  reinterpret_cast<int64_t>(
1775  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1776  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1777  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1778  const auto& frag_col_buffers = getFragColBuffers();
1779  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1780  target_info.sql_type,
1782  frag_col_buffers[col_lazy_fetch->local_col_id],
1783  getCoordsDataPtr(geo_target_ptr));
1784  } else {
1785  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1786  target_info.sql_type,
1788  is_gpu_fetch ? getDataMgr() : nullptr,
1789  is_gpu_fetch,
1790  device_id_,
1791  getCoordsDataPtr(geo_target_ptr),
1792  getCoordsLength(geo_target_ptr));
1793  }
1794  break;
1795  }
1796  case kMULTIPOINT: {
1797  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1798  const auto& varlen_buffer = getSeparateVarlenStorage();
1799  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1800  varlen_buffer.size());
1801 
1802  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1803  target_info.sql_type,
1805  nullptr,
1806  false,
1807  device_id_,
1808  reinterpret_cast<int64_t>(
1809  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1810  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1811  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1812  const auto& frag_col_buffers = getFragColBuffers();
1813 
1814  auto ptr = frag_col_buffers[col_lazy_fetch->local_col_id];
1816  int64_t index = getCoordsDataPtr(geo_target_ptr);
1817  return NestedArrayToGeoTargetValue<1,
1821  ptr, index, target_info.sql_type, geo_return_type_);
1822  }
1823  return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1824  target_info.sql_type,
1826  frag_col_buffers[col_lazy_fetch->local_col_id],
1827  getCoordsDataPtr(geo_target_ptr));
1828  } else {
1829  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1830  target_info.sql_type,
1832  is_gpu_fetch ? getDataMgr() : nullptr,
1833  is_gpu_fetch,
1834  device_id_,
1835  getCoordsDataPtr(geo_target_ptr),
1836  getCoordsLength(geo_target_ptr));
1837  }
1838  break;
1839  }
1840  case kLINESTRING: {
1841  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1842  const auto& varlen_buffer = getSeparateVarlenStorage();
1843  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1844  varlen_buffer.size());
1845 
1846  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1847  target_info.sql_type,
1849  nullptr,
1850  false,
1851  device_id_,
1852  reinterpret_cast<int64_t>(
1853  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1854  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1855  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1856  const auto& frag_col_buffers = getFragColBuffers();
1857 
1858  auto ptr = frag_col_buffers[col_lazy_fetch->local_col_id];
1860  int64_t index = getCoordsDataPtr(geo_target_ptr);
1861  return NestedArrayToGeoTargetValue<1,
1865  ptr, index, target_info.sql_type, geo_return_type_);
1866  }
1867  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1868  target_info.sql_type,
1870  frag_col_buffers[col_lazy_fetch->local_col_id],
1871  getCoordsDataPtr(geo_target_ptr));
1872  } else {
1873  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1874  target_info.sql_type,
1876  is_gpu_fetch ? getDataMgr() : nullptr,
1877  is_gpu_fetch,
1878  device_id_,
1879  getCoordsDataPtr(geo_target_ptr),
1880  getCoordsLength(geo_target_ptr));
1881  }
1882  break;
1883  }
1884  case kMULTILINESTRING: {
1885  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1886  const auto& varlen_buffer = getSeparateVarlenStorage();
1887  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1888  varlen_buffer.size());
1889 
1890  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1891  target_info.sql_type,
1893  nullptr,
1894  false,
1895  device_id_,
1896  reinterpret_cast<int64_t>(
1897  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1898  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1899  reinterpret_cast<int64_t>(
1900  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1901  static_cast<int64_t>(
1902  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1903  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1904  const auto& frag_col_buffers = getFragColBuffers();
1905 
1906  auto ptr = frag_col_buffers[col_lazy_fetch->local_col_id];
1908  int64_t index = getCoordsDataPtr(geo_target_ptr);
1909  return NestedArrayToGeoTargetValue<2,
1913  ptr, index, target_info.sql_type, geo_return_type_);
1914  }
1915 
1916  return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1917  target_info.sql_type,
1919  frag_col_buffers[col_lazy_fetch->local_col_id],
1920  getCoordsDataPtr(geo_target_ptr),
1921  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1922  getCoordsDataPtr(geo_target_ptr));
1923  } else {
1924  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1925  target_info.sql_type,
1927  is_gpu_fetch ? getDataMgr() : nullptr,
1928  is_gpu_fetch,
1929  device_id_,
1930  getCoordsDataPtr(geo_target_ptr),
1931  getCoordsLength(geo_target_ptr),
1932  getRingSizesPtr(geo_target_ptr),
1933  getRingSizesLength(geo_target_ptr) * 4);
1934  }
1935  break;
1936  }
1937  case kPOLYGON: {
1938  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1939  const auto& varlen_buffer = getSeparateVarlenStorage();
1940  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1941  varlen_buffer.size());
1942 
1943  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1944  target_info.sql_type,
1946  nullptr,
1947  false,
1948  device_id_,
1949  reinterpret_cast<int64_t>(
1950  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1951  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1952  reinterpret_cast<int64_t>(
1953  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1954  static_cast<int64_t>(
1955  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1956  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1957  const auto& frag_col_buffers = getFragColBuffers();
1958  auto ptr = frag_col_buffers[col_lazy_fetch->local_col_id];
1960  int64_t index = getCoordsDataPtr(geo_target_ptr);
1961  return NestedArrayToGeoTargetValue<2,
1965  ptr, index, target_info.sql_type, geo_return_type_);
1966  }
1967 
1968  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1969  target_info.sql_type,
1971  frag_col_buffers[col_lazy_fetch->local_col_id],
1972  getCoordsDataPtr(geo_target_ptr),
1973  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1974  getCoordsDataPtr(geo_target_ptr));
1975  } else {
1976  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1977  target_info.sql_type,
1979  is_gpu_fetch ? getDataMgr() : nullptr,
1980  is_gpu_fetch,
1981  device_id_,
1982  getCoordsDataPtr(geo_target_ptr),
1983  getCoordsLength(geo_target_ptr),
1984  getRingSizesPtr(geo_target_ptr),
1985  getRingSizesLength(geo_target_ptr) * 4);
1986  }
1987  break;
1988  }
1989  case kMULTIPOLYGON: {
1990  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1991  const auto& varlen_buffer = getSeparateVarlenStorage();
1992  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1993  varlen_buffer.size());
1994 
1995  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1996  target_info.sql_type,
1998  nullptr,
1999  false,
2000  device_id_,
2001  reinterpret_cast<int64_t>(
2002  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
2003  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
2004  reinterpret_cast<int64_t>(
2005  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
2006  static_cast<int64_t>(
2007  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
2008  reinterpret_cast<int64_t>(
2009  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
2010  static_cast<int64_t>(
2011  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
2012  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
2013  const auto& frag_col_buffers = getFragColBuffers();
2014  auto ptr = frag_col_buffers[col_lazy_fetch->local_col_id];
2016  int64_t index = getCoordsDataPtr(geo_target_ptr);
2017  return NestedArrayToGeoTargetValue<3,
2021  ptr, index, target_info.sql_type, geo_return_type_);
2022  }
2023 
2024  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
2025  target_info.sql_type,
2027  frag_col_buffers[col_lazy_fetch->local_col_id],
2028  getCoordsDataPtr(geo_target_ptr),
2029  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
2030  getCoordsDataPtr(geo_target_ptr),
2031  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
2032  getCoordsDataPtr(geo_target_ptr));
2033  } else {
2034  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
2035  target_info.sql_type,
2037  is_gpu_fetch ? getDataMgr() : nullptr,
2038  is_gpu_fetch,
2039  device_id_,
2040  getCoordsDataPtr(geo_target_ptr),
2041  getCoordsLength(geo_target_ptr),
2042  getRingSizesPtr(geo_target_ptr),
2043  getRingSizesLength(geo_target_ptr) * 4,
2044  getPolyRingsPtr(geo_target_ptr),
2045  getPolyRingsLength(geo_target_ptr) * 4);
2046  }
2047  break;
2048  }
2049  default:
2050  throw std::runtime_error("Unknown Geometry type encountered: " +
2051  target_info.sql_type.get_type_name());
2052  }
2053  UNREACHABLE();
2054  return TargetValue(nullptr);
2055 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:926
GeoReturnType geo_return_type_
Definition: ResultSet.h:988
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
TargetValue NestedArrayToGeoTargetValue(const int8_t *buf, const int64_t index, const SQLTypeInfo &ti, const ResultSet::GeoReturnType return_type)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define UNREACHABLE()
Definition: Logger.h:338
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:979
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:391
bool is_agg
Definition: TargetInfo.h:50
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:951
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
std::string get_type_name() const
Definition: sqltypes.h:484
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:597
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:528
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:945

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ScalarTargetValue ResultSet::makeStringTargetValue ( SQLTypeInfo const &  chosen_type,
bool const  translate_strings,
int64_t const  ival 
) const
private

Definition at line 2072 of file ResultSetIteration.cpp.

References getString(), and NULL_INT.

Referenced by convertToScalarTargetValue(), and makeTargetValue().

2074  {
2075  if (translate_strings) {
2076  if (static_cast<int32_t>(ival) == NULL_INT) { // TODO(alex): this isn't nice, fix it
2077  return NullableString(nullptr);
2078  } else {
2079  return NullableString(getString(chosen_type, ival));
2080  }
2081  } else {
2082  return static_cast<int64_t>(static_cast<int32_t>(ival));
2083  }
2084 }
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 2087 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, calculateQuantile(), CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, convertToScalarTargetValue(), count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), isNullIval(), kAPPROX_QUANTILE, kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kMODE, kSINGLE_VALUE, kSUM, kSUM_IF, result_set::lazy_decode(), lazy_fetch_info_, makeStringTargetValue(), NULL_DOUBLE, nullScalarTargetValue(), query_mem_desc_, read_int_from_buff(), and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

2093  {
2094  auto actual_compact_sz = compact_sz;
2095  const auto& type_info = target_info.sql_type;
2096  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
2098  actual_compact_sz = sizeof(float);
2099  } else {
2100  actual_compact_sz = sizeof(double);
2101  }
2102  if (target_info.is_agg &&
2103  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
2104  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
2105  target_info.agg_kind == kMAX || target_info.agg_kind == kSINGLE_VALUE)) {
2106  // The above listed aggregates use two floats in a single 8-byte slot. Set the
2107  // padded size to 4 bytes to properly read each value.
2108  actual_compact_sz = sizeof(float);
2109  }
2110  }
2111  if (get_compact_type(target_info).is_date_in_days()) {
2112  // Dates encoded in days are converted to 8 byte values on read.
2113  actual_compact_sz = sizeof(int64_t);
2114  }
2115 
2116  // String dictionary keys are read as 32-bit values regardless of encoding
2117  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
2118  type_info.getStringDictKey().dict_id) {
2119  actual_compact_sz = sizeof(int32_t);
2120  }
2121 
2122  auto ival = read_int_from_buff(ptr, actual_compact_sz);
2123  const auto& chosen_type = get_compact_type(target_info);
2124  if (!lazy_fetch_info_.empty()) {
2125  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
2126  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
2127  if (col_lazy_fetch.is_lazily_fetched) {
2128  CHECK_GE(ival, 0);
2129  const auto storage_idx = getStorageIndex(entry_buff_idx);
2130  CHECK_LT(storage_idx.first, col_buffers_.size());
2131  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
2132  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
2133  ival = result_set::lazy_decode(
2134  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
2135  if (chosen_type.is_fp()) {
2136  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
2137  if (chosen_type.get_type() == kFLOAT) {
2138  return ScalarTargetValue(static_cast<float>(dval));
2139  } else {
2140  return ScalarTargetValue(dval);
2141  }
2142  }
2143  }
2144  }
2145  if (target_info.agg_kind == kMODE) {
2146  if (!isNullIval(chosen_type, translate_strings, ival)) {
2147  auto const* const* const agg_mode = reinterpret_cast<AggMode const* const*>(ptr);
2148  if (std::optional<int64_t> const mode = (*agg_mode)->mode()) {
2149  return convertToScalarTargetValue(chosen_type, translate_strings, *mode);
2150  }
2151  }
2152  return nullScalarTargetValue(chosen_type, translate_strings);
2153  }
2154  if (chosen_type.is_fp()) {
2155  if (target_info.agg_kind == kAPPROX_QUANTILE) {
2156  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
2157  ? NULL_DOUBLE // sql_validate / just_validate
2158  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
2159  }
2160  switch (actual_compact_sz) {
2161  case 8: {
2162  const auto dval = *reinterpret_cast<const double*>(ptr);
2163  return chosen_type.get_type() == kFLOAT
2164  ? ScalarTargetValue(static_cast<const float>(dval))
2165  : ScalarTargetValue(dval);
2166  }
2167  case 4: {
2168  CHECK_EQ(kFLOAT, chosen_type.get_type());
2169  return *reinterpret_cast<const float*>(ptr);
2170  }
2171  default:
2172  CHECK(false);
2173  }
2174  }
2175  if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
2176  chosen_type.is_timeinterval()) {
2177  if (is_distinct_target(target_info)) {
2179  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
2180  }
2181  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
2182  // right type instead
2183  if (inline_int_null_val(chosen_type) ==
2184  int_resize_cast(ival, chosen_type.get_logical_size())) {
2185  return inline_int_null_val(type_info);
2186  }
2187  return ival;
2188  }
2189  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
2190  return makeStringTargetValue(chosen_type, translate_strings, ival);
2191  }
2192  if (chosen_type.is_decimal()) {
2193  if (decimal_to_double) {
2194  if (target_info.is_agg &&
2195  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
2196  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
2197  target_info.agg_kind == kMAX) &&
2198  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
2199  return NULL_DOUBLE;
2200  }
2201  if (!chosen_type.get_notnull() &&
2202  ival ==
2203  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
2204  return NULL_DOUBLE;
2205  }
2206  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
2207  }
2208  return ival;
2209  }
2210  CHECK(false);
2211  return TargetValue(int64_t(0));
2212 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:926
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
static ScalarTargetValue nullScalarTargetValue(SQLTypeInfo const &, bool const translate_strings)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define CHECK_GE(x, y)
Definition: Logger.h:306
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
static bool isNullIval(SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
Definition: sqldefs.h:78
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_agg
Definition: TargetInfo.h:50
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
ScalarTargetValue convertToScalarTargetValue(SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
Definition: sqldefs.h:80
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:102
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1047
SQLAgg agg_kind
Definition: TargetInfo.h:51
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:561
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
bool is_date_in_days() const
Definition: sqltypes.h:1018
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
Definition: sqldefs.h:79
Definition: sqldefs.h:77
Definition: sqldefs.h:86
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1358 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, is_null(), SQLTypeInfo::is_string(), FlatBufferManager::isFlatBuffer(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and VarlenArray_get_nth().

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1365  {
1366  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1367  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1368  if (varlen_ptr < 0) {
1369  CHECK_EQ(-1, varlen_ptr);
1370  if (target_info.sql_type.get_type() == kARRAY) {
1371  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1372  }
1373  return TargetValue(nullptr);
1374  }
1375  const auto storage_idx = getStorageIndex(entry_buff_idx);
1376  if (target_info.sql_type.is_string()) {
1377  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1378  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1379  const auto& varlen_buffer_for_storage =
1380  serialized_varlen_buffer_[storage_idx.first];
1381  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1382  return varlen_buffer_for_storage[varlen_ptr];
1383  } else if (target_info.sql_type.get_type() == kARRAY) {
1384  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1385  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1386  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1387 
1388  return build_array_target_value(
1389  target_info.sql_type,
1390  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1391  varlen_buffer[varlen_ptr].size(),
1392  translate_strings,
1394  } else {
1395  CHECK(false);
1396  }
1397  }
1398  if (!lazy_fetch_info_.empty()) {
1399  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1400  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1401  if (col_lazy_fetch.is_lazily_fetched) {
1402  const auto storage_idx = getStorageIndex(entry_buff_idx);
1403  CHECK_LT(storage_idx.first, col_buffers_.size());
1404  auto& frag_col_buffers =
1405  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1406  bool is_end{false};
1407  auto col_buf = const_cast<int8_t*>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1408  if (target_info.sql_type.is_string()) {
1409  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1410  FlatBufferManager m{col_buf};
1411  std::string fetched_str;
1412  bool is_null{};
1413  auto status = m.getItem(varlen_ptr, fetched_str, is_null);
1414  if (is_null) {
1415  return TargetValue(nullptr);
1416  }
1417  CHECK_EQ(status, FlatBufferManager::Status::Success);
1418  return fetched_str;
1419  }
1420  VarlenDatum vd;
1422  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, false, &vd, &is_end);
1423  CHECK(!is_end);
1424  if (vd.is_null) {
1425  return TargetValue(nullptr);
1426  }
1427  CHECK(vd.pointer);
1428  CHECK_GT(vd.length, 0u);
1429  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1430  return fetched_str;
1431  } else {
1432  CHECK(target_info.sql_type.is_array());
1433  ArrayDatum ad;
1434  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1435  VarlenArray_get_nth(col_buf, varlen_ptr, &ad, &is_end);
1436  } else {
1438  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1439  }
1440  if (ad.is_null) {
1441  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1442  }
1443  CHECK_GE(ad.length, 0u);
1444  if (ad.length > 0) {
1445  CHECK(ad.pointer);
1446  }
1447  return build_array_target_value(target_info.sql_type,
1448  ad.pointer,
1449  ad.length,
1450  translate_strings,
1452  }
1453  }
1454  }
1455  if (!varlen_ptr) {
1456  if (target_info.sql_type.is_array()) {
1457  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1458  }
1459  return TargetValue(nullptr);
1460  }
1461  auto length = read_int_from_buff(ptr2, compact_sz2);
1462  if (target_info.sql_type.is_array()) {
1463  const auto& elem_ti = target_info.sql_type.get_elem_type();
1464  length *= elem_ti.get_array_context_logical_size();
1465  }
1466  std::vector<int8_t> cpu_buffer;
1467  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1468  cpu_buffer.resize(length);
1469  const auto executor = query_mem_desc_.getExecutor();
1470  CHECK(executor);
1471  auto data_mgr = executor->getDataMgr();
1472  auto allocator = std::make_unique<CudaAllocator>(
1473  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1474 
1475  allocator->copyFromDevice(
1476  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1477  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1478  }
1479  if (target_info.sql_type.is_array()) {
1480  return build_array_target_value(target_info.sql_type,
1481  reinterpret_cast<const int8_t*>(varlen_ptr),
1482  length,
1483  translate_strings,
1485  }
1486  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1487 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:926
bool is_null
Definition: Datum.h:59
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
#define CHECK_GE(x, y)
Definition: Logger.h:306
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:979
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:391
#define CHECK_GT(x, y)
Definition: Logger.h:305
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: Datum.h:58
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:229
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:954
bool is_agg
Definition: TargetInfo.h:50
CONSTEXPR DEVICE bool is_null(const T &value)
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:181
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
int get_array_context_logical_size() const
Definition: sqltypes.h:691
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:967
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:966
bool separate_varlen_storage_valid_
Definition: ResultSet.h:980
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
bool is_string() const
Definition: sqltypes.h:561
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:528
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:977
bool is_array() const
Definition: sqltypes.h:585
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
DEVICE void VarlenArray_get_nth(int8_t *buf, int n, ArrayDatum *result, bool *is_end)
Definition: sqltypes.h:1716
size_t length
Definition: Datum.h:57
const int device_id_
Definition: ResultSet.h:945

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 737 of file ResultSet.cpp.

737  {
738  crt_row_buff_idx_ = 0;
739  fetched_so_far_ = 0;
740 }
size_t fetched_so_far_
Definition: ResultSet.h:951
size_t crt_row_buff_idx_
Definition: ResultSet.h:950
ScalarTargetValue ResultSet::nullScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings 
)
static

Definition at line 1101 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1102  {
1103  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(NULL_DOUBLE)
1105  : ti.is_string() ? translate_strings
1106  ? ScalarTargetValue(NullableString(nullptr))
1107  : ScalarTargetValue(static_cast<int64_t>(NULL_INT))
1109 }
#define NULL_DOUBLE
#define NULL_FLOAT
bool is_any(T &&value)
Definition: misc.h:267
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 635 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), threading_serial::parallel_reduce(), and logger::thread_local_ids().

635  {
636  using namespace threading;
637  auto execute_parallel_row_count =
638  [this, parent_thread_local_ids = logger::thread_local_ids()](
639  const blocked_range<size_t>& r, size_t row_count) {
640  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
641  for (size_t i = r.begin(); i < r.end(); ++i) {
642  if (!isRowAtEmpty(i)) {
643  ++row_count;
644  }
645  }
646  return row_count;
647  };
648  const auto row_count = parallel_reduce(blocked_range<size_t>(0, entryCount()),
649  size_t(0),
650  execute_parallel_row_count,
651  std::plus<int>());
652  return get_truncated_row_count(row_count, getLimit(), drop_first_);
653 }
size_t getLimit() const
Definition: ResultSet.cpp:1409
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:545
size_t drop_first_
Definition: ResultSet.h:952
Value parallel_reduce(const blocked_range< Int > &range, const Value &identity, const RealBody &real_body, const Reduction &reduction, const Partitioner &p=Partitioner())
Parallel iteration with reduction.
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:882

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 878 of file ResultSet.cpp.

References gpu_enabled::copy(), cpu_threads(), DEBUG_TIMER, threading_std::task_group::run(), logger::thread_local_ids(), and threading_std::task_group::wait().

880  {
881  auto timer = DEBUG_TIMER(__func__);
882  const size_t nthreads = cpu_threads();
883 
884  // Split permutation_ into nthreads subranges and top-sort in-place.
886  std::vector<PermutationView> permutation_views(nthreads);
887  threading::task_group top_sort_threads;
888  for (auto interval : makeIntervals<PermutationIdx>(0, permutation_.size(), nthreads)) {
889  top_sort_threads.run([this,
890  &order_entries,
891  &permutation_views,
892  top_n,
893  executor,
894  parent_thread_local_ids = logger::thread_local_ids(),
895  interval] {
896  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
897  PermutationView pv(permutation_.data() + interval.begin, 0, interval.size());
898  pv = initPermutationBuffer(pv, interval.begin, interval.end);
899  const auto compare = createComparator(order_entries, pv, executor, true);
900  permutation_views[interval.index] = topPermutation(pv, top_n, compare);
901  });
902  }
903  top_sort_threads.wait();
904 
905  // In case you are considering implementing a parallel reduction, note that the
906  // ResultSetComparator constructor is O(N) in order to materialize some of the aggregate
907  // columns as necessary to perform a comparison. This cost is why reduction is chosen to
908  // be serial instead; only one more Comparator is needed below.
909 
910  // Left-copy disjoint top-sorted subranges into one contiguous range.
911  // ++++....+++.....+++++... -> ++++++++++++............
912  auto end = permutation_.begin() + permutation_views.front().size();
913  for (size_t i = 1; i < nthreads; ++i) {
914  std::copy(permutation_views[i].begin(), permutation_views[i].end(), end);
915  end += permutation_views[i].size();
916  }
917 
918  // Top sort final range.
919  PermutationView pv(permutation_.data(), end - permutation_.begin());
920  const auto compare = createComparator(order_entries, pv, executor, false);
921  pv = topPermutation(pv, top_n, compare);
922  permutation_.resize(pv.size());
923  permutation_.shrink_to_fit();
924 }
Permutation permutation_
Definition: ResultSet.h:955
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:858
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:877
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1315
#define DEBUG_TIMER(name)
Definition: Logger.h:412
int cpu_threads()
Definition: thread_count.h:25
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:882

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1369 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

1370  {
1371  auto timer = DEBUG_TIMER(__func__);
1373  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
1374  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
1375  CHECK_EQ(size_t(1), order_entries.size());
1376  auto buffer_ptr = storage_->getUnderlyingBuffer();
1377  for (const auto& order_entry : order_entries) {
1378  const auto target_idx = order_entry.tle_no - 1;
1379  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
1380  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1381  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1382  sort_groups_cpu(sortkey_val_buff,
1383  &idx_buff[0],
1385  order_entry.is_desc,
1386  chosen_bytes);
1387  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
1388  &idx_buff[0],
1390  &tmp_buff[0],
1391  sizeof(int64_t));
1392  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
1393  ++target_idx) {
1394  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
1395  continue;
1396  }
1397  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1398  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
1399  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1400  apply_permutation_cpu(satellite_val_buff,
1401  &idx_buff[0],
1403  &tmp_buff[0],
1404  chosen_bytes);
1405  }
1406  }
1407 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:412
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1329 of file ResultSet.cpp.

References CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, Catalog_Namespace::SysCatalog::getDataMgr(), getQueryEngineCudaStreamForDevice(), GPU, inplace_sort_gpu(), Catalog_Namespace::SysCatalog::instance(), and KernelPerFragment.

1330  {
1331  auto timer = DEBUG_TIMER(__func__);
1333  const int device_id{0};
1334  auto allocator = std::make_unique<CudaAllocator>(
1335  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
1336  CHECK_GT(block_size_, 0);
1337  CHECK_GT(grid_size_, 0);
1338  std::vector<int64_t*> group_by_buffers(block_size_);
1339  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
1340  auto dev_group_by_buffers =
1341  create_dev_group_by_buffers(allocator.get(),
1342  group_by_buffers,
1344  block_size_,
1345  grid_size_,
1346  device_id,
1348  /*num_input_rows=*/-1,
1349  /*prepend_index_buffer=*/true,
1350  /*always_init_group_by_on_host=*/true,
1351  /*use_bump_allocator=*/false,
1352  /*has_varlen_output=*/false,
1353  /*insitu_allocator*=*/nullptr);
1355  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
1357  *allocator,
1358  group_by_buffers,
1359  query_mem_desc_.getBufferSizeBytes(ExecutorDeviceType::GPU),
1360  dev_group_by_buffers.data,
1362  block_size_,
1363  grid_size_,
1364  device_id,
1365  /*use_bump_allocator=*/false,
1366  /*has_varlen_output=*/false);
1367 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *device_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, const bool has_varlen_output, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:70
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:947
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:948
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:305
unsigned block_size_
Definition: ResultSet.h:957
Data_Namespace::DataMgr & getDataMgr() const
Definition: SysCatalog.h:234
static SysCatalog & instance()
Definition: SysCatalog.h:343
unsigned grid_size_
Definition: ResultSet.h:958