OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowIterationState
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
std::string toString () const
 
std::string summaryToString () const
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &, std::shared_ptr< VarlenOutputInfo >=nullptr) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, size_t top_n, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step) More...
 
void invalidateCachedRowCount () const
 
void setCachedRowCount (const size_t row_count) const
 
bool isEmpty () const
 Returns a boolean signifying whether there are valid entries in the result set. More...
 
size_t entryCount () const
 Returns the number of entries the result set is allocated to hold. More...
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
std::string getExplanation () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const PermutationgetPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
ResultSetPtr copy ()
 
void clearPermutation ()
 
void initStatus ()
 
void invalidateResultSetChunks ()
 
const bool isEstimator () const
 
void setCached (bool val)
 
const bool isCached () const
 
void setExecTime (const long exec_time)
 
const long getExecTime () const
 
void setQueryPlanHash (const QueryPlanHash query_plan)
 
const QueryPlanHash getQueryPlanHash ()
 
std::unordered_set< size_t > getInputTableKeys () const
 
void setInputTableKeys (std::unordered_set< size_t > &&intput_table_keys)
 
void setTargetMetaInfo (const std::vector< TargetMetaInfo > &target_meta_info)
 
std::vector< TargetMetaInfogetTargetMetaInfo ()
 
std::optional< bool > canUseSpeculativeTopNSort () const
 
void setUseSpeculativeTopNSort (bool value)
 
const bool hasValidBuffer () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
bool areAnyColumnsLazyFetched () const
 
size_t getNumColumnsLazyFetched () const
 
void setSeparateVarlenStorageValid (const bool val)
 
const std::vector< std::string > getStringDictionaryPayloadCopy (const int dict_id) const
 
const std::pair< std::vector
< int32_t >, std::vector
< std::string > > 
getUniqueStringsForDictEncodedTargetCol (const size_t col_idx) const
 
StringDictionaryProxygetStringDictionaryProxy (int const dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
ChunkStats getTableFunctionChunkStats (const size_t target_idx) const
 
void translateDictEncodedColumns (std::vector< TargetInfo > const &, size_t const start_idx)
 
void eachCellInColumn (RowIterationState &, CellCallback const &)
 
const ExecutorgetExecutor () const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 
static double calculateQuantile (quantile::TDigest *const t_digest)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using ApproxQuantileBuffers = std::vector< std::vector< double >>
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
const VarlenOutputInfogetVarlenOutputInfo (const size_t entry_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
Comparator createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
 
PermutationView initPermutationBuffer (PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
size_t rowCountImpl (const bool force_parallel) const
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (CountDistinctSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static PermutationView topPermutation (PermutationView, const size_t n, const Comparator &)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
Permutation permutation_
 
const Catalog_Namespace::Catalogcatalog_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
bool cached_
 
size_t query_exec_time_
 
QueryPlanHash query_plan_
 
std::unordered_set< size_t > input_table_keys_
 
std::vector< TargetMetaInfotarget_meta_info_
 
std::optional< bool > can_use_speculative_top_n_sort
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 159 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::ApproxQuantileBuffers = std::vector<std::vector<double>>
private

Definition at line 795 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 935 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 519 of file ResultSet.h.

519  {
522  WktString,
525  GeoTargetValueGpuPtr
527  };
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 62 of file ResultSet.cpp.

69  : targets_(targets)
70  , device_type_(device_type)
71  , device_id_(-1)
72  , query_mem_desc_(query_mem_desc)
74  , fetched_so_far_(0)
75  , drop_first_(0)
76  , keep_first_(0)
77  , row_set_mem_owner_(row_set_mem_owner)
78  , catalog_(catalog)
79  , block_size_(block_size)
80  , grid_size_(grid_size)
81  , data_mgr_(nullptr)
83  , just_explain_(false)
84  , for_validation_only_(false)
87  , cached_(false)
88  , query_exec_time_(0)
90  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:941
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
size_t query_exec_time_
Definition: ResultSet.h:951
size_t keep_first_
Definition: ResultSet.h:910
const bool just_explain_
Definition: ResultSet.h:940
unsigned block_size_
Definition: ResultSet.h:915
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
size_t drop_first_
Definition: ResultSet.h:909
bool cached_
Definition: ResultSet.h:949
unsigned grid_size_
Definition: ResultSet.h:916
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:932
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
size_t fetched_so_far_
Definition: ResultSet.h:908
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
QueryPlanHash query_plan_
Definition: ResultSet.h:952
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:903
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 92 of file ResultSet.cpp.

104  : targets_(targets)
105  , device_type_(device_type)
106  , device_id_(device_id)
107  , query_mem_desc_(query_mem_desc)
108  , crt_row_buff_idx_(0)
109  , fetched_so_far_(0)
110  , drop_first_(0)
111  , keep_first_(0)
112  , row_set_mem_owner_(row_set_mem_owner)
113  , catalog_(catalog)
114  , block_size_(block_size)
115  , grid_size_(grid_size)
116  , lazy_fetch_info_(lazy_fetch_info)
117  , col_buffers_{col_buffers}
118  , frag_offsets_{frag_offsets}
119  , consistent_frag_sizes_{consistent_frag_sizes}
120  , data_mgr_(nullptr)
122  , just_explain_(false)
123  , for_validation_only_(false)
126  , cached_(false)
127  , query_exec_time_(0)
129  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:941
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
size_t query_exec_time_
Definition: ResultSet.h:951
size_t keep_first_
Definition: ResultSet.h:910
const bool just_explain_
Definition: ResultSet.h:940
unsigned block_size_
Definition: ResultSet.h:915
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
size_t drop_first_
Definition: ResultSet.h:909
bool cached_
Definition: ResultSet.h:949
unsigned grid_size_
Definition: ResultSet.h:916
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:932
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:927
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
size_t fetched_so_far_
Definition: ResultSet.h:908
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
QueryPlanHash query_plan_
Definition: ResultSet.h:952
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:926
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:903
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 163 of file ResultSet.cpp.

References CPU.

165  , device_id_(-1)
166  , fetched_so_far_(0)
168  , explanation_(explanation)
169  , just_explain_(true)
170  , for_validation_only_(false)
173  , cached_(false)
174  , query_exec_time_(0)
176  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:941
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
size_t query_exec_time_
Definition: ResultSet.h:951
const bool just_explain_
Definition: ResultSet.h:940
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
bool cached_
Definition: ResultSet.h:949
std::string explanation_
Definition: ResultSet.h:939
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
size_t fetched_so_far_
Definition: ResultSet.h:908
QueryPlanHash query_plan_
Definition: ResultSet.h:952
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:903
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 178 of file ResultSet.cpp.

References CPU.

182  , device_id_(-1)
183  , fetched_so_far_(0)
184  , row_set_mem_owner_(row_set_mem_owner)
185  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
187  , just_explain_(true)
188  , for_validation_only_(false)
191  , cached_(false)
192  , query_exec_time_(0)
194  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:941
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
size_t query_exec_time_
Definition: ResultSet.h:951
const bool just_explain_
Definition: ResultSet.h:940
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
bool cached_
Definition: ResultSet.h:949
QueryExecutionTimings timings_
Definition: ResultSet.h:917
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
size_t fetched_so_far_
Definition: ResultSet.h:908
QueryPlanHash query_plan_
Definition: ResultSet.h:952
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:903
ResultSet::~ResultSet ( )

Definition at line 196 of file ResultSet.cpp.

References CHECK, CPU, and data_mgr_().

196  {
197  if (storage_) {
198  if (!storage_->buff_is_provided_) {
199  CHECK(storage_->getUnderlyingBuffer());
200  free(storage_->getUnderlyingBuffer());
201  }
202  }
203  for (auto& storage : appended_storage_) {
204  if (storage && !storage->buff_is_provided_) {
205  free(storage->getUnderlyingBuffer());
206  }
207  }
211  }
213  CHECK(data_mgr_);
215  }
216 }
AppendedStorage appended_storage_
Definition: ResultSet.h:906
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:932
int8_t * host_estimator_buffer_
Definition: ResultSet.h:931
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:528
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:930

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 718 of file ResultSet.cpp.

718  {
719  timings_.compilation_queue_time += compilation_queue_time;
720 }
QueryExecutionTimings timings_
Definition: ResultSet.h:917
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &  ,
std::shared_ptr< VarlenOutputInfo = nullptr 
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 299 of file ResultSet.cpp.

References CHECK.

299  {
301  if (!that.storage_) {
302  return;
303  }
304  appended_storage_.push_back(std::move(that.storage_));
307  appended_storage_.back()->query_mem_desc_.getEntryCount());
308  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
309  col_buffers_.insert(
310  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
311  frag_offsets_.insert(
312  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
314  that.consistent_frag_sizes_.begin(),
315  that.consistent_frag_sizes_.end());
316  chunk_iters_.insert(
317  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
319  CHECK(that.separate_varlen_storage_valid_);
321  that.serialized_varlen_buffer_.begin(),
322  that.serialized_varlen_buffer_.end());
323  }
324  for (auto& buff : that.literal_buffers_) {
325  literal_buffers_.push_back(std::move(buff));
326  }
327 }
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:906
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:920
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:937
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:919
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:923
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:927
#define CHECK(condition)
Definition: Logger.h:223
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:926
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
bool ResultSet::areAnyColumnsLazyFetched ( ) const
inline

Definition at line 561 of file ResultSet.h.

References lazy_fetch_info_.

561  {
562  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
563  return std::any_of(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
564  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 616 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

616  {
617  if (!storage_) {
618  return 0;
619  }
620 
621  size_t row_count = storage_->binSearchRowCount();
622  for (auto& s : appended_storage_) {
623  row_count += s->binSearchRowCount();
624  }
625 
626  return get_truncated_row_count(row_count, getLimit(), drop_first_);
627 }
AppendedStorage appended_storage_
Definition: ResultSet.h:906
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
size_t getLimit() const
Definition: ResultSet.cpp:1302
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:909

+ Here is the call graph for this function:

double ResultSet::calculateQuantile ( quantile::TDigest *const  t_digest)
static

Definition at line 1008 of file ResultSet.cpp.

References CHECK, quantile::detail::TDigest< RealType, IndexType >::mergeBufferFinal(), NULL_DOUBLE, and quantile::detail::TDigest< RealType, IndexType >::quantile().

Referenced by makeTargetValue().

1008  {
1009  static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
1010  CHECK(t_digest);
1011  t_digest->mergeBufferFinal();
1012  double const quantile = t_digest->quantile();
1013  return boost::math::isnan(quantile) ? NULL_DOUBLE : quantile;
1014 }
#define NULL_DOUBLE
DEVICE RealType quantile(VectorView< IndexType const > const partial_sum, RealType const q) const
Definition: quantile.h:828
DEVICE void mergeBufferFinal()
Definition: quantile.h:652
#define CHECK(condition)
Definition: Logger.h:223

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
std::optional<bool> ResultSet::canUseSpeculativeTopNSort ( ) const
inline

Definition at line 503 of file ResultSet.h.

References can_use_speculative_top_n_sort.

503  {
505  }
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
void ResultSet::clearPermutation ( )
inline

Definition at line 448 of file ResultSet.h.

References permutation_.

Referenced by initStatus().

448  {
449  if (!permutation_.empty()) {
450  permutation_.clear();
451  }
452  }
Permutation permutation_
Definition: ResultSet.h:912

+ Here is the caller graph for this function:

size_t ResultSet::colCount ( ) const

Definition at line 413 of file ResultSet.cpp.

413  {
414  return just_explain_ ? 1 : targets_.size();
415 }
const bool just_explain_
Definition: ResultSet.h:940
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
ResultSetPtr ResultSet::copy ( )

Definition at line 329 of file ResultSet.cpp.

References CHECK, gpu_enabled::copy(), and DEBUG_TIMER.

329  {
330  auto timer = DEBUG_TIMER(__func__);
331  if (!storage_) {
332  return nullptr;
333  }
334 
335  auto executor = getExecutor();
336  CHECK(executor);
337  ResultSetPtr copied_rs = std::make_shared<ResultSet>(targets_,
338  device_type_,
341  executor->getCatalog(),
342  executor->blockSize(),
343  executor->gridSize());
344 
345  auto allocate_and_copy_storage =
346  [&](const ResultSetStorage* prev_storage) -> std::unique_ptr<ResultSetStorage> {
347  const auto& prev_qmd = prev_storage->query_mem_desc_;
348  const auto storage_size = prev_qmd.getBufferSizeBytes(device_type_);
349  auto buff = row_set_mem_owner_->allocate(storage_size, /*thread_idx=*/0);
350  std::unique_ptr<ResultSetStorage> new_storage;
351  new_storage.reset(new ResultSetStorage(
352  prev_storage->targets_, prev_qmd, buff, /*buff_is_provided=*/true));
353  new_storage->target_init_vals_ = prev_storage->target_init_vals_;
354  if (prev_storage->varlen_output_info_) {
355  new_storage->varlen_output_info_ = prev_storage->varlen_output_info_;
356  }
357  memcpy(new_storage->buff_, prev_storage->buff_, storage_size);
358  new_storage->query_mem_desc_ = prev_qmd;
359  return new_storage;
360  };
361 
362  copied_rs->storage_ = allocate_and_copy_storage(storage_.get());
363  if (!appended_storage_.empty()) {
364  for (const auto& storage : appended_storage_) {
365  copied_rs->appended_storage_.push_back(allocate_and_copy_storage(storage.get()));
366  }
367  }
368  std::copy(chunks_.begin(), chunks_.end(), std::back_inserter(copied_rs->chunks_));
369  std::copy(chunk_iters_.begin(),
370  chunk_iters_.end(),
371  std::back_inserter(copied_rs->chunk_iters_));
372  std::copy(col_buffers_.begin(),
373  col_buffers_.end(),
374  std::back_inserter(copied_rs->col_buffers_));
375  std::copy(frag_offsets_.begin(),
376  frag_offsets_.end(),
377  std::back_inserter(copied_rs->frag_offsets_));
380  std::back_inserter(copied_rs->consistent_frag_sizes_));
384  std::back_inserter(copied_rs->serialized_varlen_buffer_));
385  }
386  std::copy(literal_buffers_.begin(),
387  literal_buffers_.end(),
388  std::back_inserter(copied_rs->literal_buffers_));
389  std::copy(lazy_fetch_info_.begin(),
390  lazy_fetch_info_.end(),
391  std::back_inserter(copied_rs->lazy_fetch_info_));
392 
393  copied_rs->permutation_ = permutation_;
394  copied_rs->drop_first_ = drop_first_;
395  copied_rs->keep_first_ = keep_first_;
396  copied_rs->separate_varlen_storage_valid_ = separate_varlen_storage_valid_;
397  copied_rs->query_exec_time_ = query_exec_time_;
398  copied_rs->input_table_keys_ = input_table_keys_;
399  copied_rs->target_meta_info_ = target_meta_info_;
400  copied_rs->geo_return_type_ = geo_return_type_;
401  copied_rs->query_plan_ = query_plan_;
403  copied_rs->can_use_speculative_top_n_sort = can_use_speculative_top_n_sort;
404  }
405 
406  return copied_rs;
407 }
Permutation permutation_
Definition: ResultSet.h:912
AppendedStorage appended_storage_
Definition: ResultSet.h:906
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
size_t query_exec_time_
Definition: ResultSet.h:951
std::shared_ptr< ResultSet > ResultSetPtr
size_t keep_first_
Definition: ResultSet.h:910
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:920
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:937
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
const Executor * getExecutor() const
Definition: ResultSet.h:605
size_t drop_first_
Definition: ResultSet.h:909
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:919
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:923
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:953
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:954
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:927
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
#define DEBUG_TIMER(name)
Definition: Logger.h:370
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
QueryPlanHash query_plan_
Definition: ResultSet.h:952
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:926
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938

+ Here is the call graph for this function:

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copies its content into a contiguous output_buffer

Definition at line 1123 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1125  {
1127  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1128  CHECK(output_buffer_size > 0);
1129  CHECK(output_buffer);
1130  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1131  size_t out_buff_offset = 0;
1132 
1133  // the main storage:
1134  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1135  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1136  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1137  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1138  CHECK(crt_buffer_size <= output_buffer_size);
1139  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1140 
1141  out_buff_offset += crt_buffer_size;
1142 
1143  // the appended storages:
1144  for (size_t i = 0; i < appended_storage_.size(); i++) {
1145  const size_t crt_storage_row_count =
1146  appended_storage_[i]->query_mem_desc_.getEntryCount();
1147  if (crt_storage_row_count == 0) {
1148  // skip an empty appended storage
1149  continue;
1150  }
1151  CHECK_LT(out_buff_offset, output_buffer_size);
1152  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1153  const size_t column_offset =
1154  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1155  const int8_t* storage_buffer =
1156  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1157  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1158  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1159 
1160  out_buff_offset += crt_buffer_size;
1161  }
1162 }
AppendedStorage appended_storage_
Definition: ResultSet.h:906
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:233
#define CHECK(condition)
Definition: Logger.h:223
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1357

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( CountDistinctSet count_distinct_active_buffer_set) const
private
Comparator ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const PermutationView  permutation,
const Executor executor,
const bool  single_threaded 
)
inlineprivate

Definition at line 836 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

839  {
840  auto timer = DEBUG_TIMER(__func__);
842  return [rsc = ResultSetComparator<ColumnWiseTargetAccessor>(
843  order_entries, this, permutation, executor, single_threaded)](
844  const PermutationIdx lhs, const PermutationIdx rhs) {
845  return rsc(lhs, rhs);
846  };
847  } else {
848  return [rsc = ResultSetComparator<RowWiseTargetAccessor>(
849  order_entries, this, permutation, executor, single_threaded)](
850  const PermutationIdx lhs, const PermutationIdx rhs) {
851  return rsc(lhs, rhs);
852  };
853  }
854  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
uint32_t PermutationIdx
Definition: ResultSet.h:154
#define DEBUG_TIMER(name)
Definition: Logger.h:370

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 668 of file ResultSet.cpp.

668  {
669  return (!storage_ && !estimator_ && !just_explain_) || cached_row_count_ == 0;
670 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
const bool just_explain_
Definition: ResultSet.h:940
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:929
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 537 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

537 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 57 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

57  {
59  drop_first_ = n;
60 }
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:909
constexpr double n
Definition: Utm.h:38
void ResultSet::eachCellInColumn ( RowIterationState state,
CellCallback const &  func 
)

Definition at line 485 of file ResultSet.cpp.

References advance_slot(), advance_to_next_columnar_target_buff(), ResultSet::RowIterationState::agg_idx_, align_to_int64(), ResultSet::RowIterationState::buf_ptr_, CHECK, CHECK_GE, CHECK_LT, ResultSet::RowIterationState::compact_sz1_, ResultSet::RowIterationState::cur_target_idx_, QueryMemoryDescriptor::didOutputColumnar(), get_cols_ptr(), get_key_bytes_rowwise(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), ResultSet::RowIterationState::prev_target_idx_, read_int_from_buff(), and row_ptr_rowwise().

485  {
486  size_t const target_idx = state.cur_target_idx_;
487  QueryMemoryDescriptor& storage_qmd = storage_->query_mem_desc_;
488  CHECK_LT(target_idx, lazy_fetch_info_.size());
489  auto& col_lazy_fetch = lazy_fetch_info_[target_idx];
490  CHECK(col_lazy_fetch.is_lazily_fetched);
491  int const target_size = storage_->targets_[target_idx].sql_type.get_size();
492  CHECK_LT(0, target_size) << storage_->targets_[target_idx].toString();
493  size_t const nrows = storage_->binSearchRowCount();
494  if (storage_qmd.didOutputColumnar()) {
495  // Logic based on ResultSet::ColumnWiseTargetAccessor::initializeOffsetsForStorage()
496  if (state.buf_ptr_ == nullptr) {
497  state.buf_ptr_ = get_cols_ptr(storage_->buff_, storage_qmd);
498  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
499  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
501  }
502  for (size_t j = state.prev_target_idx_; j < state.cur_target_idx_; ++j) {
503  size_t const next_target_idx = j + 1; // Set state to reflect next target_idx j+1
504  state.buf_ptr_ = advance_to_next_columnar_target_buff(
505  state.buf_ptr_, storage_qmd, state.agg_idx_);
506  auto const& next_agg_info = storage_->targets_[next_target_idx];
507  state.agg_idx_ =
508  advance_slot(state.agg_idx_, next_agg_info, separate_varlen_storage_valid_);
509  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
510  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
512  }
513  for (size_t i = 0; i < nrows; ++i) {
514  int8_t const* const pos_ptr = state.buf_ptr_ + i * state.compact_sz1_;
515  int64_t pos = read_int_from_buff(pos_ptr, target_size);
516  CHECK_GE(pos, 0);
517  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
518  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
519  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
520  func(col_frag + pos * target_size);
521  }
522  } else {
523  size_t const key_bytes_with_padding =
525  for (size_t i = 0; i < nrows; ++i) {
526  int8_t const* const keys_ptr = row_ptr_rowwise(storage_->buff_, storage_qmd, i);
527  int8_t const* const rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
528  int64_t pos = *reinterpret_cast<int64_t const*>(rowwise_target_ptr);
529  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
530  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
531  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
532  func(col_frag + pos * target_size);
533  }
534  }
535 }
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define CHECK_GE(x, y)
Definition: Logger.h:236
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
size_t getEffectiveKeyWidth() const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:233
#define CHECK(condition)
Definition: Logger.h:223
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const

+ Here is the call graph for this function:

size_t ResultSet::entryCount ( ) const

Returns the number of entries the result set is allocated to hold.

Note that this can be greater than or equal to the actual number of valid rows in the result set, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by)

For getting the number of valid rows in the result set (inclusive of any applied LIMIT and/or OFFSET), use ResultSet::rowCount(). Or to just test if there are any valid rows, use ResultSet::entryCount(), as a return value from entryCount() greater than 0 does not neccesarily mean the result set is empty.

Definition at line 752 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

752  {
753  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
754 }
Permutation permutation_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 410 of file ResultSet.h.

References CHECK, and storage_.

410  {
411  CHECK(storage_);
412  if (storage_->query_mem_desc_.didOutputColumnar()) {
413  storage_->fillOneEntryColWise(entry);
414  } else {
415  storage_->fillOneEntryRowWise(entry);
416  }
417  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK(condition)
Definition: Logger.h:223
ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 939 of file ResultSet.cpp.

Referenced by getVarlenOutputInfo(), and makeGeoTargetValue().

939  {
940  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
941  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
942  fixedup_entry_idx,
943  stg_idx};
944 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
AppendedStorage appended_storage_
Definition: ResultSet.h:906
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 756 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), Executor::executeTableFunction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initRowGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

757  {
758  auto query_mem_desc_copy = query_mem_desc;
759  query_mem_desc_copy.resetGroupColWidths(
760  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
761  if (query_mem_desc.didOutputColumnar()) {
762  return query_mem_desc_copy;
763  }
764  query_mem_desc_copy.alignPaddedSlots();
765  return query_mem_desc_copy;
766 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 756 of file ResultSetIteration.cpp.

References CHECK, and storage_.

756  {
757  CHECK(storage_);
758  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
759 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK(condition)
Definition: Logger.h:223
SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 417 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

417  {
418  if (just_explain_) {
419  return SQLTypeInfo(kTEXT, false);
420  }
421  CHECK_LT(col_idx, targets_.size());
422  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
423  : targets_[col_idx].sql_type;
424 }
const bool just_explain_
Definition: ResultSet.h:940
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
#define CHECK_LT(x, y)
Definition: Logger.h:233
Definition: sqltypes.h:52
Definition: sqldefs.h:74
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1295 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1297  {
1298  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1299  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1300  const auto column_offset =
1301  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1302  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1303  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1304  storage_->query_mem_desc_.getEntryCount();
1305  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1306  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1307 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK_NE(x, y)
Definition: Logger.h:232
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1388 of file ResultSet.cpp.

References CHECK.

1388  {
1390  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1391 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1379
#define CHECK(condition)
Definition: Logger.h:223
size_t getColOffInBytes(const size_t col_idx) const
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1241 of file ResultSetIteration.cpp.

References storage_.

1243  {
1244  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1245  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1246  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1247 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1088 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1090  {
1091  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1092  if (col_buffers_[storage_idx].size() > 1) {
1093  int64_t frag_id = 0;
1094  int64_t local_idx = global_idx;
1095  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1096  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1097  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1098  } else {
1099  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1100  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1101  CHECK_LE(local_idx, global_idx);
1102  }
1103  CHECK_GE(frag_id, int64_t(0));
1104  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1105  global_idx = local_idx;
1106  return col_buffers_[storage_idx][frag_id];
1107  } else {
1108  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1109  return col_buffers_[storage_idx][0];
1110  }
1111 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
#define CHECK_GE(x, y)
Definition: Logger.h:236
#define CHECK_LT(x, y)
Definition: Logger.h:233
#define CHECK_LE(x, y)
Definition: Logger.h:234
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:927
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:926
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 291 of file ResultSet.cpp.

291  {
292  if (crt_row_buff_idx_ == 0) {
293  throw std::runtime_error("current row buffer iteration index is undefined");
294  }
295  return crt_row_buff_idx_ - 1;
296 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 686 of file ResultSet.cpp.

References CHECK, and GPU.

686  {
690 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:930
int ResultSet::getDeviceId ( ) const

Definition at line 752 of file ResultSet.cpp.

752  {
753  return device_id_;
754 }
const int device_id_
Definition: ResultSet.h:903
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 250 of file ResultSet.cpp.

250  {
251  return device_type_;
252 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1165 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1167  {
1168  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1169  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1170  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1171  } else {
1172  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1173  }
1174  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1175  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1176  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1177  } else {
1178  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1179  }
1180  } else {
1181  UNREACHABLE() << "Invalid query type is used";
1182  return 0;
1183  }
1184 }
#define UNREACHABLE()
Definition: Logger.h:267
const long ResultSet::getExecTime ( ) const
inline

Definition at line 483 of file ResultSet.h.

References query_exec_time_.

483 { return query_exec_time_; }
size_t query_exec_time_
Definition: ResultSet.h:951
const Executor* ResultSet::getExecutor ( ) const
inline

Definition at line 605 of file ResultSet.h.

References QueryMemoryDescriptor::getExecutor(), and query_mem_desc_.

605 { return query_mem_desc_.getExecutor(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
const Executor * getExecutor() const

+ Here is the call graph for this function:

std::string ResultSet::getExplanation ( ) const
inline

Definition at line 395 of file ResultSet.h.

References explanation_, and just_explain_.

395  {
396  if (just_explain_) {
397  return explanation_;
398  }
399  return {};
400  }
const bool just_explain_
Definition: ResultSet.h:940
std::string explanation_
Definition: ResultSet.h:939
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 528 of file ResultSet.h.

References geo_return_type_.

528 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
int ResultSet::getGpuCount ( ) const
private
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 692 of file ResultSet.cpp.

692  {
693  return host_estimator_buffer_;
694 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:931
std::unordered_set<size_t> ResultSet::getInputTableKeys ( ) const
inline

Definition at line 489 of file ResultSet.h.

References input_table_keys_.

489 { return input_table_keys_; }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:953
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 557 of file ResultSet.h.

References lazy_fetch_info_.

557  {
558  return lazy_fetch_info_;
559  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
size_t ResultSet::getLimit ( ) const

Definition at line 1302 of file ResultSet.cpp.

1302  {
1303  return keep_first_;
1304 }
size_t keep_first_
Definition: ResultSet.h:910
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, CHECK_LE, LOG, and logger::WARNING.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  if (bits_set == 0) {
38  // empty result set, return 1 for a groups buffer size of 1
39  return 1;
40  }
41  const auto total_bits = estimator_->getBufferSize() * 8;
42  CHECK_LE(bits_set, total_bits);
43  const auto unset_bits = total_bits - bits_set;
44  const auto ratio = static_cast<double>(unset_bits) / total_bits;
45  if (ratio == 0.) {
46  LOG(WARNING)
47  << "Failed to get a high quality cardinality estimation, falling back to "
48  "approximate group by buffer size guess.";
49  return 0;
50  }
51  return -static_cast<double>(total_bits) * log(ratio);
52 }
#define LOG(tag)
Definition: Logger.h:217
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:929
#define CHECK_LE(x, y)
Definition: Logger.h:234
int8_t * host_estimator_buffer_
Definition: ResultSet.h:931
#define CHECK(condition)
Definition: Logger.h:223
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:39

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 298 of file ResultSetIteration.cpp.

299  {
300  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
301  if (!storage_ && !just_explain_) {
302  return {};
303  }
304  return getNextRowUnlocked(translate_strings, decimal_to_double);
305 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:943
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
const bool just_explain_
Definition: ResultSet.h:940
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 320 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

321  {
322  size_t entry_buff_idx = 0;
323  do {
325  return {};
326  }
327 
328  entry_buff_idx = advanceCursorToNextEntry();
329 
330  if (crt_row_buff_idx_ >= entryCount()) {
332  return {};
333  }
335  ++fetched_so_far_;
336 
337  } while (drop_first_ && fetched_so_far_ <= drop_first_);
338 
339  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
340  CHECK(!row.empty());
341 
342  return row;
343 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:910
size_t drop_first_
Definition: ResultSet.h:909
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:223
size_t fetched_so_far_
Definition: ResultSet.h:908
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
size_t advanceCursorToNextEntry() const
std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 307 of file ResultSetIteration.cpp.

309  {
310  if (just_explain_) {
311  if (fetched_so_far_) {
312  return {};
313  }
314  fetched_so_far_ = 1;
315  return {explanation_};
316  }
317  return getNextRowImpl(translate_strings, decimal_to_double);
318 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:940
std::string explanation_
Definition: ResultSet.h:939
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:908
size_t ResultSet::getNumColumnsLazyFetched ( ) const
inline

Definition at line 566 of file ResultSet.h.

References lazy_fetch_info_.

566  {
567  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
568  return std::count_if(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
569  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 236 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), and row_ptr_rowwise().

236  {
237  const auto storage_lookup_result = findStorage(global_entry_idx);
238  const auto storage = storage_lookup_result.storage_ptr;
239  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
240  if (storage->isEmptyEntry(local_entry_idx)) {
241  return {0, false};
242  }
243  const auto buff = storage->buff_;
244  CHECK(buff);
246  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
247  const auto key_bytes_with_padding =
249  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
250  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
251  keys_ptr,
252  global_entry_idx,
253  targets_.front(),
254  0,
255  0,
256  false,
257  false,
258  false);
259  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
260  CHECK(scalar_tv);
261  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
262  CHECK(ival_ptr);
263  return {*ival_ptr, true};
264 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK(condition)
Definition: Logger.h:223
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 546 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

546  {
547  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
548  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const Permutation & ResultSet::getPermutationBuffer ( ) const

Definition at line 862 of file ResultSet.cpp.

862  {
863  return permutation_;
864 }
Permutation permutation_
Definition: ResultSet.h:912
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 542 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

542  {
544  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 672 of file ResultSet.cpp.

References CHECK.

672  {
673  CHECK(storage_);
674  return storage_->query_mem_desc_;
675 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK(condition)
Definition: Logger.h:223
const QueryPlanHash ResultSet::getQueryPlanHash ( )
inline

Definition at line 487 of file ResultSet.h.

References query_plan_.

487 { return query_plan_; }
QueryPlanHash query_plan_
Definition: ResultSet.h:952
int64_t ResultSet::getQueueTime ( ) const

Definition at line 722 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 727 of file ResultSet.cpp.

727  {
728  return timings_.render_time;
729 }
QueryExecutionTimings timings_
Definition: ResultSet.h:917
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 275 of file ResultSetIteration.cpp.

277  {
278  if (logical_index >= entryCount()) {
279  return {};
280  }
281  const auto entry_idx =
282  permutation_.empty() ? logical_index : permutation_[logical_index];
283  return getRowAt(entry_idx, false, false, false, targets_to_skip);
284 }
Permutation permutation_
Definition: ResultSet.h:912
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 431 of file ResultSet.h.

References row_set_mem_owner_.

431  {
432  return row_set_mem_owner_;
433  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1273 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1275  {
1276  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1277  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1278  auto keys_ptr = row_ptr_rowwise(
1279  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1280  const auto column_offset =
1281  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1282  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1283  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1284  const auto storage_buffer = keys_ptr + column_offset;
1285  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1286 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK_NE(x, y)
Definition: Logger.h:232
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1256 of file ResultSetIteration.cpp.

References storage_.

1258  {
1259  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1260  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1261  const int8_t* storage_buffer =
1262  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1263  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1264 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1394 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1394  {
1395  std::vector<bool> target_bitmap(targets_.size(), true);
1396  size_t num_single_slot_targets = 0;
1397  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1398  const auto& sql_type = targets_[target_idx].sql_type;
1399  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1400  target_bitmap[target_idx] = false;
1401  } else if (sql_type.is_varlen()) {
1402  target_bitmap[target_idx] = false;
1403  } else {
1404  num_single_slot_targets++;
1405  }
1406  }
1407  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1408 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
Definition: sqldefs.h:74

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1437 of file ResultSet.cpp.

References advance_slot().

1437  {
1438  std::vector<size_t> slot_indices(targets_.size(), 0);
1439  size_t slot_index = 0;
1440  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1441  slot_indices[target_idx] = slot_index;
1442  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1443  }
1444  return slot_indices;
1445 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 409 of file ResultSet.cpp.

409  {
410  return storage_.get();
411 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 914 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

914  {
915  size_t fixedup_entry_idx = entry_idx;
916  auto entry_count = storage_->query_mem_desc_.getEntryCount();
917  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
918  if (fixedup_entry_idx < entry_count) {
919  return {0, fixedup_entry_idx};
920  }
921  fixedup_entry_idx -= entry_count;
922  for (size_t i = 0; i < appended_storage_.size(); ++i) {
923  const auto& desc = appended_storage_[i]->query_mem_desc_;
924  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
925  entry_count = desc.getEntryCount();
926  if (fixedup_entry_idx < entry_count) {
927  return {i + 1, fixedup_entry_idx};
928  }
929  fixedup_entry_idx -= entry_count;
930  }
931  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
933  return {};
934 }
AppendedStorage appended_storage_
Definition: ResultSet.h:906
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define UNREACHABLE()
Definition: Logger.h:267
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK_NE(x, y)
Definition: Logger.h:232

+ Here is the caller graph for this function:

const std::vector< std::string > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 1306 of file ResultSet.cpp.

References catalog_(), and CHECK.

1307  {
1308  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1309  dict_id, /*with_generation=*/true, catalog_);
1310  CHECK(sdp);
1311  return sdp->getDictionary()->copyStrings();
1312 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
#define CHECK(condition)
Definition: Logger.h:223

+ Here is the call graph for this function:

StringDictionaryProxy * ResultSet::getStringDictionaryProxy ( int const  dict_id) const

Definition at line 426 of file ResultSet.cpp.

References catalog_().

426  {
427  constexpr bool with_generation = true;
428  return catalog_ ? row_set_mem_owner_->getOrAddStringDictProxy(
429  dict_id, with_generation, catalog_)
430  : row_set_mem_owner_->getStringDictProxy(dict_id);
431 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911

+ Here is the call graph for this function:

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1418 of file ResultSet.cpp.

References CHECK, CHECK_GE, is_distinct_target(), kAPPROX_QUANTILE, kFLOAT, and kSAMPLE.

1419  {
1421  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1422 
1423  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1424  const auto& target = targets_[target_idx];
1425  if (single_slot_targets[target_idx] &&
1426  (is_distinct_target(target) || target.agg_kind == kAPPROX_QUANTILE ||
1427  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1428  single_slot_targets[target_idx] = false;
1429  num_single_slot_targets--;
1430  }
1431  }
1432  CHECK_GE(num_single_slot_targets, size_t(0));
1433  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1434 }
#define CHECK_GE(x, y)
Definition: Logger.h:236
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1394
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:153
#define CHECK(condition)
Definition: Logger.h:223
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1357

+ Here is the call graph for this function:

ChunkStats ResultSet::getTableFunctionChunkStats ( const size_t  target_idx) const
const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 677 of file ResultSet.cpp.

677  {
678  return targets_;
679 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 681 of file ResultSet.cpp.

References CHECK.

681  {
682  CHECK(storage_);
683  return storage_->target_init_vals_;
684 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
#define CHECK(condition)
Definition: Logger.h:223
std::vector<TargetMetaInfo> ResultSet::getTargetMetaInfo ( )
inline

Definition at line 501 of file ResultSet.h.

References target_meta_info_.

501 { return target_meta_info_; }
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:954
TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1887 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1897  {
1899  const auto col1_ptr = col_ptr;
1900  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1901  const auto next_col_ptr =
1902  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1903  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1904  is_real_str_or_array(target_info))
1905  ? next_col_ptr
1906  : nullptr;
1907  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1908  is_real_str_or_array(target_info))
1909  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1910  : 0;
1911 
1912  // TODO(Saman): add required logics for count distinct
1913  // geospatial target values:
1914  if (target_info.sql_type.is_geometry()) {
1915  return makeGeoTargetValue(
1916  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1917  }
1918 
1919  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1920  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1921  CHECK(col2_ptr);
1922  CHECK(compact_sz2);
1923  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1924  return target_info.agg_kind == kAVG
1925  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1926  : makeVarlenTargetValue(ptr1,
1927  compact_sz1,
1928  ptr2,
1929  compact_sz2,
1930  target_info,
1931  target_logical_idx,
1932  translate_strings,
1933  global_entry_idx);
1934  }
1936  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1937  return makeTargetValue(ptr1,
1938  compact_sz1,
1939  target_info,
1940  target_logical_idx,
1941  translate_strings,
1943  global_entry_idx);
1944  }
1945  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1946  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1947  CHECK_GE(key_idx, 0);
1948  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1949  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1950  key_width,
1951  target_info,
1952  target_logical_idx,
1953  translate_strings,
1955  global_entry_idx);
1956 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:51
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define CHECK_GE(x, y)
Definition: Logger.h:236
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
bool is_agg
Definition: TargetInfo.h:49
size_t targetGroupbyIndicesSize() const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:50
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:223
bool is_geometry() const
Definition: sqltypes.h:522
Definition: sqldefs.h:74
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1960 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1969  {
1970  if (UNLIKELY(fixup_count_distinct_pointers)) {
1971  if (is_distinct_target(target_info)) {
1972  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
1973  const auto remote_ptr = *count_distinct_ptr_ptr;
1974  if (remote_ptr) {
1975  const auto ptr = storage_->mappedPtr(remote_ptr);
1976  if (ptr) {
1977  *count_distinct_ptr_ptr = ptr;
1978  } else {
1979  // need to create a zero filled buffer for this remote_ptr
1980  const auto& count_distinct_desc =
1981  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
1982  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
1983  ? count_distinct_desc.bitmapSizeBytes()
1984  : count_distinct_desc.bitmapPaddedSizeBytes();
1985  auto count_distinct_buffer = row_set_mem_owner_->allocateCountDistinctBuffer(
1986  bitmap_byte_sz, /*thread_idx=*/0);
1987  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
1988  }
1989  }
1990  }
1991  return int64_t(0);
1992  }
1993  if (target_info.sql_type.is_geometry()) {
1994  return makeGeoTargetValue(
1995  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
1996  }
1997 
1998  auto ptr1 = rowwise_target_ptr;
1999  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2001  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2002  // Single column perfect hash group by can utilize one slot for both the key and the
2003  // target value if both values fit in 8 bytes. Use the target value actual size for
2004  // this case. If they don't, the target value should be 8 bytes, so we can still use
2005  // the actual size rather than the compact size.
2006  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2007  }
2008 
2009  // logic for deciding width of column
2010  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2011  const auto ptr2 =
2012  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2013  int8_t compact_sz2 = 0;
2014  // Skip reading the second slot if we have a none encoded string and are using
2015  // the none encoded strings buffer attached to ResultSetStorage
2017  (target_info.sql_type.is_array() ||
2018  (target_info.sql_type.is_string() &&
2019  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2020  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2021  }
2022  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2023  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2024  }
2025  CHECK(ptr2);
2026  return target_info.agg_kind == kAVG
2027  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2028  : makeVarlenTargetValue(ptr1,
2029  compact_sz1,
2030  ptr2,
2031  compact_sz2,
2032  target_info,
2033  target_logical_idx,
2034  translate_strings,
2035  entry_buff_idx);
2036  }
2038  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2039  return makeTargetValue(ptr1,
2040  compact_sz1,
2041  target_info,
2042  target_logical_idx,
2043  translate_strings,
2045  entry_buff_idx);
2046  }
2047  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2048  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2049  return makeTargetValue(ptr1,
2050  key_width,
2051  target_info,
2052  target_logical_idx,
2053  translate_strings,
2055  entry_buff_idx);
2056 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:51
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
bool is_agg
Definition: TargetInfo.h:49
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:153
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:50
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:223
bool is_geometry() const
Definition: sqltypes.h:522
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
bool is_string() const
Definition: sqltypes.h:510
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:74
bool is_array() const
Definition: sqltypes.h:518
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

const std::pair< std::vector< int32_t >, std::vector< std::string > > ResultSet::getUniqueStringsForDictEncodedTargetCol ( const size_t  col_idx) const

Definition at line 1315 of file ResultSet.cpp.

References catalog_(), CHECK, and inline_fixed_encoding_null_val().

1315  {
1316  const auto col_type_info = getColType(col_idx);
1317  CHECK(col_type_info.is_dict_encoded_string());
1318  std::unordered_set<int32_t> unique_string_ids_set;
1319  const size_t num_entries = entryCount();
1320  std::vector<bool> targets_to_skip(colCount(), true);
1321  targets_to_skip[col_idx] = false;
1322  const auto null_val = inline_fixed_encoding_null_val(col_type_info);
1323 
1324  for (size_t row_idx = 0; row_idx < num_entries; ++row_idx) {
1325  const auto result_row = getRowAtNoTranslations(row_idx, targets_to_skip);
1326  if (!result_row.empty()) {
1327  const auto scalar_col_val = boost::get<ScalarTargetValue>(result_row[col_idx]);
1328  const int32_t string_id = static_cast<int32_t>(boost::get<int64_t>(scalar_col_val));
1329  if (string_id != null_val) {
1330  unique_string_ids_set.emplace(string_id);
1331  }
1332  }
1333  }
1334 
1335  const size_t num_unique_strings = unique_string_ids_set.size();
1336  std::vector<int32_t> unique_string_ids(num_unique_strings);
1337  size_t string_idx{0};
1338  for (const auto unique_string_id : unique_string_ids_set) {
1339  unique_string_ids[string_idx++] = unique_string_id;
1340  }
1341 
1342  const int32_t dict_id = col_type_info.get_comp_param();
1343  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1344  dict_id, /*with_generation=*/true, catalog_);
1345  CHECK(sdp);
1346 
1347  return std::make_pair(unique_string_ids, sdp->getStrings(unique_string_ids));
1348 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
size_t colCount() const
Definition: ResultSet.cpp:413
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
SQLTypeInfo getColType(const size_t col_idx) const
Definition: ResultSet.cpp:417
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:223
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto data_mgr = executor->getDataMgr();
636  auto allocator = std::make_unique<CudaAllocator>(
637  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
638  allocator->copyFromDevice(
639  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
640  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
641  } else {
643  host_str_ptr = reinterpret_cast<char*>(str_ptr);
644  }
645  std::string str(host_str_ptr, str_len);
646  return InternalTargetValue(row_set_mem_owner_->addString(str));
647 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:903

+ Here is the call graph for this function:

const VarlenOutputInfo * ResultSet::getVarlenOutputInfo ( const size_t  entry_idx) const
private

Definition at line 1113 of file ResultSetIteration.cpp.

References CHECK, and findStorage().

Referenced by makeGeoTargetValue().

1113  {
1114  auto storage_lookup_result = findStorage(entry_idx);
1115  CHECK(storage_lookup_result.storage_ptr);
1116  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1117 }
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
#define CHECK(condition)
Definition: Logger.h:223

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::hasValidBuffer ( ) const
inline

Definition at line 509 of file ResultSet.h.

References storage_.

509  {
510  if (storage_) {
511  return true;
512  }
513  return false;
514  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 424 of file ResultSet.h.

References chunk_iters_.

424  {
425  chunk_iters_.push_back(chunk_iters);
426  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:920
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 421 of file ResultSet.h.

References chunks_.

421  {
422  chunks_ = chunks;
423  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:919
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 427 of file ResultSet.h.

References literal_buffers_.

427  {
428  literal_buffers_.push_back(std::move(literal_buff));
429  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:923
void ResultSet::initializeStorage ( ) const

Definition at line 1045 of file ResultSetReduction.cpp.

1045  {
1047  storage_->initializeColWise();
1048  } else {
1049  storage_->initializeRowWise();
1050  }
1051 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
PermutationView ResultSet::initPermutationBuffer ( PermutationView  permutation,
PermutationIdx const  begin,
PermutationIdx const  end 
) const
private

Definition at line 846 of file ResultSet.cpp.

References CHECK, DEBUG_TIMER, and VectorView< T >::push_back().

848  {
849  auto timer = DEBUG_TIMER(__func__);
850  for (PermutationIdx i = begin; i < end; ++i) {
851  const auto storage_lookup_result = findStorage(i);
852  const auto lhs_storage = storage_lookup_result.storage_ptr;
853  const auto off = storage_lookup_result.fixedup_entry_idx;
854  CHECK(lhs_storage);
855  if (!lhs_storage->isEmptyEntry(off)) {
856  permutation.push_back(i);
857  }
858  }
859  return permutation;
860 }
DEVICE void push_back(T const &value)
Definition: VectorView.h:74
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
uint32_t PermutationIdx
Definition: ResultSet.h:154
#define CHECK(condition)
Definition: Logger.h:223
#define DEBUG_TIMER(name)
Definition: Logger.h:370

+ Here is the call graph for this function:

void ResultSet::initStatus ( )
inline

Definition at line 454 of file ResultSet.h.

References clearPermutation(), crt_row_buff_idx_, drop_first_, fetched_so_far_, invalidateCachedRowCount(), keep_first_, setGeoReturnType(), and WktString.

454  {
455  // todo(yoonmin): what else we additionally need to consider
456  // to make completely clear status of the resultset for reuse?
457  crt_row_buff_idx_ = 0;
458  fetched_so_far_ = 0;
462  drop_first_ = 0;
463  keep_first_ = 0;
464  }
void setGeoReturnType(const GeoReturnType val)
Definition: ResultSet.h:529
size_t keep_first_
Definition: ResultSet.h:910
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:909
size_t fetched_so_far_
Definition: ResultSet.h:908
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
void clearPermutation()
Definition: ResultSet.h:448

+ Here is the call graph for this function:

void ResultSet::invalidateCachedRowCount ( ) const

Definition at line 605 of file ResultSet.cpp.

References uninitialized_cached_row_count.

Referenced by initStatus().

605  {
607 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50

+ Here is the caller graph for this function:

void ResultSet::invalidateResultSetChunks ( )
inline

Definition at line 466 of file ResultSet.h.

References chunk_iters_, and chunks_.

466  {
467  if (!chunks_.empty()) {
468  chunks_.clear();
469  }
470  if (!chunk_iters_.empty()) {
471  chunk_iters_.clear();
472  }
473  };
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:920
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:919
const bool ResultSet::isCached ( ) const
inline

Definition at line 479 of file ResultSet.h.

References cached_.

479 { return cached_; }
bool cached_
Definition: ResultSet.h:949
bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1357 of file ResultSet.cpp.

References CHECK, g_enable_direct_columnarization, GroupByBaselineHash, GroupByPerfectHash, Projection, and TableFunction.

Referenced by copyColumnIntoBuffer().

1357  {
1359  return false;
1360  } else if (query_mem_desc_.didOutputColumnar()) {
1361  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1369  } else {
1372  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1376  }
1377 }
Permutation permutation_
Definition: ResultSet.h:912
bool g_enable_direct_columnarization
Definition: Execute.cpp:122
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
QueryDescriptionType getQueryDescriptionType() const
#define CHECK(condition)
Definition: Logger.h:223

+ Here is the caller graph for this function:

bool ResultSet::isEmpty ( ) const

Returns a boolean signifying whether there are valid entries in the result set.

Note a result set can be logically empty even if the value returned by ResultSet::entryCount() is > 0, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function is just implemented as ResultSet::rowCount() == 0, which caches it's value so the row count will only be computed once per finalized result set.

Definition at line 649 of file ResultSet.cpp.

649  {
650  // To simplify this function and de-dup logic with ResultSet::rowCount()
651  // (mismatches between the two were causing bugs), we modified this function
652  // to simply fetch rowCount(). The potential downside of this approach is that
653  // in some cases more work will need to be done, as we can't just stop at the first row.
654  // Mitigating that for most cases is the following:
655  // 1) rowCount() is cached, so the logic for actually computing row counts will run only
656  // once
657  // per result set.
658  // 2) If the cache is empty (cached_row_count_ == -1), rowCount() will use parallel
659  // methods if deemed appropriate, which in many cases could be faster for a sparse
660  // large result set that single-threaded iteration from the beginning
661  // 3) Often where isEmpty() is needed, rowCount() is also needed. Since the first call
662  // to rowCount()
663  // will be cached, there is no extra overhead in these cases
664 
665  return rowCount() == size_t(0);
666 }
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:593
const bool ResultSet::isEstimator ( ) const
inline

Definition at line 475 of file ResultSet.h.

References estimator_.

475 { return !estimator_; }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:929
bool ResultSet::isExplain ( ) const

Definition at line 740 of file ResultSet.cpp.

740  {
741  return just_explain_;
742 }
const bool just_explain_
Definition: ResultSet.h:940
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1436 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1436  {
1437  // This should match the logic in makeGeoTargetValue which ultimately calls
1438  // fetch_data_from_gpu when the geo column is on the device.
1439  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1440  // utility function that handles this logic in one place
1441  CHECK_LT(col_idx, targets_.size());
1442  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1443  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1444  " is not a geo column. It is of type " +
1445  targets_[col_idx].sql_type.get_type_name() + ".");
1446  }
1447 
1448  const auto& target_info = targets_[col_idx];
1449  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1450  return false;
1451  }
1452 
1453  if (!lazy_fetch_info_.empty()) {
1454  CHECK_LT(col_idx, lazy_fetch_info_.size());
1455  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1456  return false;
1457  }
1458  }
1459 
1461 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
#define CHECK_LT(x, y)
Definition: Logger.h:233
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
#define IS_GEO(T)
Definition: sqltypes.h:251

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2203 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), and null_val_bit_pattern().

2205  {
2206  if (ti.get_notnull()) {
2207  return false;
2208  }
2209  if (val.isInt()) {
2210  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2211  }
2212  if (val.isPair()) {
2213  return !val.i2;
2214  }
2215  if (val.isStr()) {
2216  return !val.i1;
2217  }
2218  CHECK(val.isNull());
2219  return true;
2220 }
bool isPair() const
Definition: TargetValue.h:67
bool isStr() const
Definition: TargetValue.h:71
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isNull() const
Definition: TargetValue.h:69
bool isInt() const
Definition: TargetValue.h:65
#define CHECK(condition)
Definition: Logger.h:223
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:336

+ Here is the call graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 436 of file ResultSet.h.

References permutation_.

436 { return permutation_.empty(); };
Permutation permutation_
Definition: ResultSet.h:912
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 286 of file ResultSetIteration.cpp.

286  {
287  if (logical_index >= entryCount()) {
288  return true;
289  }
290  const auto entry_idx =
291  permutation_.empty() ? logical_index : permutation_[logical_index];
292  const auto storage_lookup_result = findStorage(entry_idx);
293  const auto storage = storage_lookup_result.storage_ptr;
294  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
295  return storage->isEmptyEntry(local_entry_idx);
296 }
Permutation permutation_
Definition: ResultSet.h:912
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool ResultSet::isTruncated ( ) const

Definition at line 736 of file ResultSet.cpp.

736  {
737  return keep_first_ + drop_first_;
738 }
size_t keep_first_
Definition: ResultSet.h:910
size_t drop_first_
Definition: ResultSet.h:909
bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 748 of file ResultSet.cpp.

748  {
749  return for_validation_only_;
750 }
bool for_validation_only_
Definition: ResultSet.h:941
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1379 of file ResultSet.cpp.

References Projection, and TableFunction.

1379  {
1384  appended_storage_.empty() && storage_ &&
1385  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1386 }
AppendedStorage appended_storage_
Definition: ResultSet.h:906
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
QueryDescriptionType getQueryDescriptionType() const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 52 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

52  {
54  keep_first_ = n;
55 }
size_t keep_first_
Definition: ResultSet.h:910
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
constexpr double n
Definition: Utm.h:38
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 649 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

651  {
652  if (!lazy_fetch_info_.empty()) {
653  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
654  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
655  if (col_lazy_fetch.is_lazily_fetched) {
656  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
657  col_buffers_.size());
658  int64_t ival_copy = ival;
659  auto& frag_col_buffers =
660  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
661  target_logical_idx,
662  ival_copy);
663  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
664  CHECK_LT(target_logical_idx, targets_.size());
665  const TargetInfo& target_info = targets_[target_logical_idx];
666  CHECK(!target_info.is_agg);
667  if (target_info.sql_type.is_string() &&
668  target_info.sql_type.get_compression() == kENCODING_NONE) {
669  VarlenDatum vd;
670  bool is_end{false};
672  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
673  storage_lookup_result.fixedup_entry_idx,
674  false,
675  &vd,
676  &is_end);
677  CHECK(!is_end);
678  if (vd.is_null) {
679  return 0;
680  }
681  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
682  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
683  }
684  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
685  }
686  }
687  return ival;
688 }
bool is_null
Definition: sqltypes.h:153
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:152
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
#define CHECK_LT(x, y)
Definition: Logger.h:233
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
#define CHECK(condition)
Definition: Logger.h:223
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: sqltypes.h:151

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1467 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), getVarlenOutputInfo(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kENCODING_GEOINT, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, QueryMemoryDescriptor::slotIsVarlenOutput(), TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1471  {
1472  CHECK(target_info.sql_type.is_geometry());
1473 
1474  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1475  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1476  };
1477 
1478  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1479  const auto storage_info = findStorage(entry_buff_idx);
1480  auto crt_geo_col_ptr = geo_target_ptr;
1481  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1482  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1483  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1484  }
1485  // adjusting the column pointer to represent a pointer to the geo target value
1486  return crt_geo_col_ptr +
1487  storage_info.fixedup_entry_idx *
1488  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1489  slot_idx + range);
1490  };
1491 
1492  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1494  ? getNextTargetBufferColWise(slot_idx, range)
1495  : getNextTargetBufferRowWise(slot_idx, range);
1496  };
1497 
1498  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1499  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1501  };
1502 
1503  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1504  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1506  };
1507 
1508  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1509  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1511  };
1512 
1513  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1514  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1516  };
1517 
1518  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1519  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1521  };
1522 
1523  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1524  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1526  };
1527 
1528  auto getFragColBuffers = [&]() -> decltype(auto) {
1529  const auto storage_idx = getStorageIndex(entry_buff_idx);
1530  CHECK_LT(storage_idx.first, col_buffers_.size());
1531  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1532  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1533  };
1534 
1535  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1536 
1537  auto getDataMgr = [&]() {
1538  auto executor = query_mem_desc_.getExecutor();
1539  CHECK(executor);
1540  return executor->getDataMgr();
1541  };
1542 
1543  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1544  const auto storage_idx = getStorageIndex(entry_buff_idx);
1545  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1546  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1547  return varlen_buffer;
1548  };
1549 
1550  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1551  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1552  return TargetValue(nullptr);
1553  }
1554 
1555  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1556  if (!lazy_fetch_info_.empty()) {
1557  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1558  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1559  }
1560 
1561  switch (target_info.sql_type.get_type()) {
1562  case kPOINT: {
1563  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1564  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1565  CHECK(varlen_output_info);
1566  auto geo_data_ptr = read_int_from_buff(
1567  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1568  auto cpu_data_ptr =
1569  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1570  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1571  target_info.sql_type,
1573  /*data_mgr=*/nullptr,
1574  /*is_gpu_fetch=*/false,
1575  device_id_,
1576  cpu_data_ptr,
1577  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1578  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1579  const auto& varlen_buffer = getSeparateVarlenStorage();
1580  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1581  varlen_buffer.size());
1582 
1583  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1584  target_info.sql_type,
1586  nullptr,
1587  false,
1588  device_id_,
1589  reinterpret_cast<int64_t>(
1590  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1591  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1592  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1593  const auto& frag_col_buffers = getFragColBuffers();
1594  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1595  target_info.sql_type,
1597  frag_col_buffers[col_lazy_fetch->local_col_id],
1598  getCoordsDataPtr(geo_target_ptr));
1599  } else {
1600  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1601  target_info.sql_type,
1603  is_gpu_fetch ? getDataMgr() : nullptr,
1604  is_gpu_fetch,
1605  device_id_,
1606  getCoordsDataPtr(geo_target_ptr),
1607  getCoordsLength(geo_target_ptr));
1608  }
1609  break;
1610  }
1611  case kLINESTRING: {
1612  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1613  const auto& varlen_buffer = getSeparateVarlenStorage();
1614  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1615  varlen_buffer.size());
1616 
1617  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1618  target_info.sql_type,
1620  nullptr,
1621  false,
1622  device_id_,
1623  reinterpret_cast<int64_t>(
1624  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1625  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1626  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1627  const auto& frag_col_buffers = getFragColBuffers();
1628  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1629  target_info.sql_type,
1631  frag_col_buffers[col_lazy_fetch->local_col_id],
1632  getCoordsDataPtr(geo_target_ptr));
1633  } else {
1634  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1635  target_info.sql_type,
1637  is_gpu_fetch ? getDataMgr() : nullptr,
1638  is_gpu_fetch,
1639  device_id_,
1640  getCoordsDataPtr(geo_target_ptr),
1641  getCoordsLength(geo_target_ptr));
1642  }
1643  break;
1644  }
1645  case kPOLYGON: {
1646  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1647  const auto& varlen_buffer = getSeparateVarlenStorage();
1648  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1649  varlen_buffer.size());
1650 
1651  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1652  target_info.sql_type,
1654  nullptr,
1655  false,
1656  device_id_,
1657  reinterpret_cast<int64_t>(
1658  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1659  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1660  reinterpret_cast<int64_t>(
1661  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1662  static_cast<int64_t>(
1663  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1664  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1665  const auto& frag_col_buffers = getFragColBuffers();
1666 
1667  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1668  target_info.sql_type,
1670  frag_col_buffers[col_lazy_fetch->local_col_id],
1671  getCoordsDataPtr(geo_target_ptr),
1672  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1673  getCoordsDataPtr(geo_target_ptr));
1674  } else {
1675  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1676  target_info.sql_type,
1678  is_gpu_fetch ? getDataMgr() : nullptr,
1679  is_gpu_fetch,
1680  device_id_,
1681  getCoordsDataPtr(geo_target_ptr),
1682  getCoordsLength(geo_target_ptr),
1683  getRingSizesPtr(geo_target_ptr),
1684  getRingSizesLength(geo_target_ptr) * 4);
1685  }
1686  break;
1687  }
1688  case kMULTIPOLYGON: {
1689  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1690  const auto& varlen_buffer = getSeparateVarlenStorage();
1691  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1692  varlen_buffer.size());
1693 
1694  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1695  target_info.sql_type,
1697  nullptr,
1698  false,
1699  device_id_,
1700  reinterpret_cast<int64_t>(
1701  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1702  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1703  reinterpret_cast<int64_t>(
1704  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1705  static_cast<int64_t>(
1706  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1707  reinterpret_cast<int64_t>(
1708  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1709  static_cast<int64_t>(
1710  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1711  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1712  const auto& frag_col_buffers = getFragColBuffers();
1713 
1714  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1715  target_info.sql_type,
1717  frag_col_buffers[col_lazy_fetch->local_col_id],
1718  getCoordsDataPtr(geo_target_ptr),
1719  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1720  getCoordsDataPtr(geo_target_ptr),
1721  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1722  getCoordsDataPtr(geo_target_ptr));
1723  } else {
1724  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1725  target_info.sql_type,
1727  is_gpu_fetch ? getDataMgr() : nullptr,
1728  is_gpu_fetch,
1729  device_id_,
1730  getCoordsDataPtr(geo_target_ptr),
1731  getCoordsLength(geo_target_ptr),
1732  getRingSizesPtr(geo_target_ptr),
1733  getRingSizesLength(geo_target_ptr) * 4,
1734  getPolyRingsPtr(geo_target_ptr),
1735  getPolyRingsLength(geo_target_ptr) * 4);
1736  }
1737  break;
1738  }
1739  default:
1740  throw std::runtime_error("Unknown Geometry type encountered: " +
1741  target_info.sql_type.get_type_name());
1742  }
1743  UNREACHABLE();
1744  return TargetValue(nullptr);
1745 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
GeoReturnType geo_return_type_
Definition: ResultSet.h:946
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:51
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define UNREACHABLE()
Definition: Logger.h:267
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:937
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:329
bool is_agg
Definition: TargetInfo.h:49
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:233
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
std::string get_type_name() const
Definition: sqltypes.h:443
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
bool is_geometry() const
Definition: sqltypes.h:522
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:903

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1748 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, calculateQuantile(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAPPROX_QUANTILE, kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, result_set::lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1754  {
1755  auto actual_compact_sz = compact_sz;
1756  const auto& type_info = target_info.sql_type;
1757  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1759  actual_compact_sz = sizeof(float);
1760  } else {
1761  actual_compact_sz = sizeof(double);
1762  }
1763  if (target_info.is_agg &&
1764  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1765  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1766  target_info.agg_kind == kSINGLE_VALUE)) {
1767  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1768  // padded size to 4 bytes to properly read each value.
1769  actual_compact_sz = sizeof(float);
1770  }
1771  }
1772  if (get_compact_type(target_info).is_date_in_days()) {
1773  // Dates encoded in days are converted to 8 byte values on read.
1774  actual_compact_sz = sizeof(int64_t);
1775  }
1776 
1777  // String dictionary keys are read as 32-bit values regardless of encoding
1778  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1779  type_info.get_comp_param()) {
1780  actual_compact_sz = sizeof(int32_t);
1781  }
1782 
1783  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1784  const auto& chosen_type = get_compact_type(target_info);
1785  if (!lazy_fetch_info_.empty()) {
1786  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1787  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1788  if (col_lazy_fetch.is_lazily_fetched) {
1789  CHECK_GE(ival, 0);
1790  const auto storage_idx = getStorageIndex(entry_buff_idx);
1791  CHECK_LT(storage_idx.first, col_buffers_.size());
1792  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1793  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1794  ival = result_set::lazy_decode(
1795  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1796  if (chosen_type.is_fp()) {
1797  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1798  if (chosen_type.get_type() == kFLOAT) {
1799  return ScalarTargetValue(static_cast<float>(dval));
1800  } else {
1801  return ScalarTargetValue(dval);
1802  }
1803  }
1804  }
1805  }
1806  if (chosen_type.is_fp()) {
1807  if (target_info.agg_kind == kAPPROX_QUANTILE) {
1808  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
1809  ? NULL_DOUBLE // sql_validate / just_validate
1810  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
1811  }
1812  switch (actual_compact_sz) {
1813  case 8: {
1814  const auto dval = *reinterpret_cast<const double*>(ptr);
1815  return chosen_type.get_type() == kFLOAT
1816  ? ScalarTargetValue(static_cast<const float>(dval))
1817  : ScalarTargetValue(dval);
1818  }
1819  case 4: {
1820  CHECK_EQ(kFLOAT, chosen_type.get_type());
1821  return *reinterpret_cast<const float*>(ptr);
1822  }
1823  default:
1824  CHECK(false);
1825  }
1826  }
1827  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1828  chosen_type.is_timeinterval()) {
1829  if (is_distinct_target(target_info)) {
1831  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1832  }
1833  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1834  // right type instead
1835  if (inline_int_null_val(chosen_type) ==
1836  int_resize_cast(ival, chosen_type.get_logical_size())) {
1837  return inline_int_null_val(type_info);
1838  }
1839  return ival;
1840  }
1841  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1842  if (translate_strings) {
1843  if (static_cast<int32_t>(ival) ==
1844  NULL_INT) { // TODO(alex): this isn't nice, fix it
1845  return NullableString(nullptr);
1846  }
1847  StringDictionaryProxy* sdp{nullptr};
1848  if (!chosen_type.get_comp_param()) {
1849  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1850  } else {
1851  sdp = catalog_
1852  ? row_set_mem_owner_->getOrAddStringDictProxy(
1853  chosen_type.get_comp_param(), /*with_generation=*/false, catalog_)
1854  : row_set_mem_owner_->getStringDictProxy(
1855  chosen_type.get_comp_param()); // unit tests bypass the catalog
1856  }
1857  return NullableString(sdp->getString(ival));
1858  } else {
1859  return static_cast<int64_t>(static_cast<int32_t>(ival));
1860  }
1861  }
1862  if (chosen_type.is_decimal()) {
1863  if (decimal_to_double) {
1864  if (target_info.is_agg &&
1865  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1866  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1867  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1868  return NULL_DOUBLE;
1869  }
1870  if (!chosen_type.get_notnull() &&
1871  ival ==
1872  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1873  return NULL_DOUBLE;
1874  }
1875  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1876  }
1877  return ival;
1878  }
1879  CHECK(false);
1880  return TargetValue(int64_t(0));
1881 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:51
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define CHECK_GE(x, y)
Definition: Logger.h:236
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
#define NULL_INT
Definition: sqldefs.h:75
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
bool is_agg
Definition: TargetInfo.h:49
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:77
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
Definition: sqldefs.h:77
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:153
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1008
SQLAgg agg_kind
Definition: TargetInfo.h:50
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:493
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:233
bool is_date_in_days() const
Definition: sqltypes.h:873
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
#define CHECK(condition)
Definition: Logger.h:223
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
Definition: sqldefs.h:76
Definition: sqldefs.h:74
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1310 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1317  {
1318  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1319  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1320  if (varlen_ptr < 0) {
1321  CHECK_EQ(-1, varlen_ptr);
1322  if (target_info.sql_type.get_type() == kARRAY) {
1323  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1324  }
1325  return TargetValue(nullptr);
1326  }
1327  const auto storage_idx = getStorageIndex(entry_buff_idx);
1328  if (target_info.sql_type.is_string()) {
1329  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1330  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1331  const auto& varlen_buffer_for_storage =
1332  serialized_varlen_buffer_[storage_idx.first];
1333  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1334  return varlen_buffer_for_storage[varlen_ptr];
1335  } else if (target_info.sql_type.get_type() == kARRAY) {
1336  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1337  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1338  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1339 
1340  return build_array_target_value(
1341  target_info.sql_type,
1342  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1343  varlen_buffer[varlen_ptr].size(),
1344  translate_strings,
1346  catalog_);
1347  } else {
1348  CHECK(false);
1349  }
1350  }
1351  if (!lazy_fetch_info_.empty()) {
1352  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1353  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1354  if (col_lazy_fetch.is_lazily_fetched) {
1355  const auto storage_idx = getStorageIndex(entry_buff_idx);
1356  CHECK_LT(storage_idx.first, col_buffers_.size());
1357  auto& frag_col_buffers =
1358  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1359  bool is_end{false};
1360  if (target_info.sql_type.is_string()) {
1361  VarlenDatum vd;
1362  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1363  frag_col_buffers[col_lazy_fetch.local_col_id])),
1364  varlen_ptr,
1365  false,
1366  &vd,
1367  &is_end);
1368  CHECK(!is_end);
1369  if (vd.is_null) {
1370  return TargetValue(nullptr);
1371  }
1372  CHECK(vd.pointer);
1373  CHECK_GT(vd.length, 0u);
1374  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1375  return fetched_str;
1376  } else {
1377  CHECK(target_info.sql_type.is_array());
1378  ArrayDatum ad;
1379  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1380  frag_col_buffers[col_lazy_fetch.local_col_id])),
1381  varlen_ptr,
1382  &ad,
1383  &is_end);
1384  CHECK(!is_end);
1385  if (ad.is_null) {
1386  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1387  }
1388  CHECK_GE(ad.length, 0u);
1389  if (ad.length > 0) {
1390  CHECK(ad.pointer);
1391  }
1392  return build_array_target_value(target_info.sql_type,
1393  ad.pointer,
1394  ad.length,
1395  translate_strings,
1397  catalog_);
1398  }
1399  }
1400  }
1401  if (!varlen_ptr) {
1402  if (target_info.sql_type.is_array()) {
1403  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1404  }
1405  return TargetValue(nullptr);
1406  }
1407  auto length = read_int_from_buff(ptr2, compact_sz2);
1408  if (target_info.sql_type.is_array()) {
1409  const auto& elem_ti = target_info.sql_type.get_elem_type();
1410  length *= elem_ti.get_array_context_logical_size();
1411  }
1412  std::vector<int8_t> cpu_buffer;
1413  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1414  cpu_buffer.resize(length);
1415  const auto executor = query_mem_desc_.getExecutor();
1416  CHECK(executor);
1417  auto data_mgr = executor->getDataMgr();
1418  auto allocator = std::make_unique<CudaAllocator>(
1419  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1420 
1421  allocator->copyFromDevice(
1422  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1423  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1424  }
1425  if (target_info.sql_type.is_array()) {
1426  return build_array_target_value(target_info.sql_type,
1427  reinterpret_cast<const int8_t*>(varlen_ptr),
1428  length,
1429  translate_strings,
1431  catalog_);
1432  }
1433  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1434 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
bool is_null
Definition: sqltypes.h:153
SQLTypeInfo sql_type
Definition: TargetInfo.h:51
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
#define CHECK_GE(x, y)
Definition: Logger.h:236
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:937
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:329
#define CHECK_GT(x, y)
Definition: Logger.h:235
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: sqltypes.h:152
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:208
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:911
bool is_agg
Definition: TargetInfo.h:49
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:157
#define CHECK_LT(x, y)
Definition: Logger.h:233
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
int get_array_context_logical_size() const
Definition: sqltypes.h:584
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:925
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
bool is_string() const
Definition: sqltypes.h:510
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:865
bool is_array() const
Definition: sqltypes.h:518
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
size_t length
Definition: sqltypes.h:151
const int device_id_
Definition: ResultSet.h:903

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 731 of file ResultSet.cpp.

731  {
732  crt_row_buff_idx_ = 0;
733  fetched_so_far_ = 0;
734 }
size_t fetched_so_far_
Definition: ResultSet.h:908
size_t crt_row_buff_idx_
Definition: ResultSet.h:907
size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 629 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), threading_serial::parallel_reduce(), logger::query_id(), and logger::set_thread_local_query_id().

629  {
630  using namespace threading;
631  auto execute_parallel_row_count = [this, query_id = logger::query_id()](
632  const blocked_range<size_t>& r,
633  size_t row_count) {
634  auto qid_scope_guard = logger::set_thread_local_query_id(query_id);
635  for (size_t i = r.begin(); i < r.end(); ++i) {
636  if (!isRowAtEmpty(i)) {
637  ++row_count;
638  }
639  }
640  return row_count;
641  };
642  const auto row_count = parallel_reduce(blocked_range<size_t>(0, entryCount()),
643  size_t(0),
644  execute_parallel_row_count,
645  std::plus<int>());
646  return get_truncated_row_count(row_count, getLimit(), drop_first_);
647 }
QidScopeGuard set_thread_local_query_id(QueryId const query_id)
Definition: Logger.cpp:484
size_t getLimit() const
Definition: ResultSet.cpp:1302
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:909
Value parallel_reduce(const blocked_range< Int > &range, const Value &identity, const RealBody &real_body, const Reduction &reduction, const Partitioner &p=Partitioner())
Parallel iteration with reduction.
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
QueryId query_id()
Definition: Logger.cpp:470

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 866 of file ResultSet.cpp.

References gpu_enabled::copy(), cpu_threads(), DEBUG_TIMER, logger::query_id(), threading_std::task_group::run(), logger::set_thread_local_query_id(), and threading_std::task_group::wait().

868  {
869  auto timer = DEBUG_TIMER(__func__);
870  const size_t nthreads = cpu_threads();
871 
872  // Split permutation_ into nthreads subranges and top-sort in-place.
874  std::vector<PermutationView> permutation_views(nthreads);
875  threading::task_group top_sort_threads;
876  for (auto interval : makeIntervals<PermutationIdx>(0, permutation_.size(), nthreads)) {
877  top_sort_threads.run([this,
878  &order_entries,
879  &permutation_views,
880  top_n,
881  executor,
883  interval] {
884  auto qid_scope_guard = logger::set_thread_local_query_id(query_id);
885  PermutationView pv(permutation_.data() + interval.begin, 0, interval.size());
886  pv = initPermutationBuffer(pv, interval.begin, interval.end);
887  const auto compare = createComparator(order_entries, pv, executor, true);
888  permutation_views[interval.index] = topPermutation(pv, top_n, compare);
889  });
890  }
891  top_sort_threads.wait();
892 
893  // In case you are considering implementing a parallel reduction, note that the
894  // ResultSetComparator constructor is O(N) in order to materialize some of the aggregate
895  // columns as necessary to perform a comparison. This cost is why reduction is chosen to
896  // be serial instead; only one more Comparator is needed below.
897 
898  // Left-copy disjoint top-sorted subranges into one contiguous range.
899  // ++++....+++.....+++++... -> ++++++++++++............
900  auto end = permutation_.begin() + permutation_views.front().size();
901  for (size_t i = 1; i < nthreads; ++i) {
902  std::copy(permutation_views[i].begin(), permutation_views[i].end(), end);
903  end += permutation_views[i].size();
904  }
905 
906  // Top sort final range.
907  PermutationView pv(permutation_.data(), end - permutation_.begin());
908  const auto compare = createComparator(order_entries, pv, executor, false);
909  pv = topPermutation(pv, top_n, compare);
910  permutation_.resize(pv.size());
911  permutation_.shrink_to_fit();
912 }
QidScopeGuard set_thread_local_query_id(QueryId const query_id)
Definition: Logger.cpp:484
Permutation permutation_
Definition: ResultSet.h:912
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:846
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:836
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1208
QueryId query_id()
Definition: Logger.cpp:470
#define DEBUG_TIMER(name)
Definition: Logger.h:370
int cpu_threads()
Definition: thread_count.h:24

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1262 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

1263  {
1264  auto timer = DEBUG_TIMER(__func__);
1266  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
1267  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
1268  CHECK_EQ(size_t(1), order_entries.size());
1269  auto buffer_ptr = storage_->getUnderlyingBuffer();
1270  for (const auto& order_entry : order_entries) {
1271  const auto target_idx = order_entry.tle_no - 1;
1272  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
1273  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1274  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1275  sort_groups_cpu(sortkey_val_buff,
1276  &idx_buff[0],
1278  order_entry.is_desc,
1279  chosen_bytes);
1280  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
1281  &idx_buff[0],
1283  &tmp_buff[0],
1284  sizeof(int64_t));
1285  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
1286  ++target_idx) {
1287  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
1288  continue;
1289  }
1290  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1291  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
1292  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1293  apply_permutation_cpu(satellite_val_buff,
1294  &idx_buff[0],
1296  &tmp_buff[0],
1297  chosen_bytes);
1298  }
1299  }
1300 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:223
#define DEBUG_TIMER(name)
Definition: Logger.h:370
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1222 of file ResultSet.cpp.

References catalog_(), CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, getQueryEngineCudaStreamForDevice(), GPU, inplace_sort_gpu(), and KernelPerFragment.

1223  {
1224  auto timer = DEBUG_TIMER(__func__);
1225  auto data_mgr = &catalog_->getDataMgr();
1226  const int device_id{0};
1227  auto allocator = std::make_unique<CudaAllocator>(
1228  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
1229  CHECK_GT(block_size_, 0);
1230  CHECK_GT(grid_size_, 0);
1231  std::vector<int64_t*> group_by_buffers(block_size_);
1232  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
1233  auto dev_group_by_buffers =
1234  create_dev_group_by_buffers(allocator.get(),
1235  group_by_buffers,
1237  block_size_,
1238  grid_size_,
1239  device_id,
1241  /*num_input_rows=*/-1,
1242  /*prepend_index_buffer=*/true,
1243  /*always_init_group_by_on_host=*/true,
1244  /*use_bump_allocator=*/false,
1245  /*has_varlen_output=*/false,
1246  /*insitu_allocator*=*/nullptr);
1248  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
1250  *allocator,
1251  group_by_buffers,
1252  query_mem_desc_.getBufferSizeBytes(ExecutorDeviceType::GPU),
1253  dev_group_by_buffers.data,
1255  block_size_,
1256  grid_size_,
1257  device_id,
1258  /*use_bump_allocator=*/false,
1259  /*has_varlen_output=*/false);
1260 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *device_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, const bool has_varlen_output, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:70
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:229
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:914
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:235
unsigned block_size_
Definition: ResultSet.h:915
unsigned grid_size_
Definition: ResultSet.h:916
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
#define DEBUG_TIMER(name)
Definition: Logger.h:370
void copy_group_by_buffers_from_gpu(DeviceAllocator &device_allocator, const std::vector< int64_t * > &group_by_buffers, const size_t groups_buffer_size, const int8_t *group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer, const bool has_varlen_output)

+ Here is the call graph for this function:

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step)

Note that this can be less than or equal to the value returned by ResultSet::getEntries(), whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function references/sets a cached value (cached_row_count_) so that the cost of computing the result is only paid once per result set.

If the actual row count is not cached and needs to be computed, in some cases that can be O(1) (i.e. if limits and offsets are present, or for the output of a table function). For projections, we use a binary search, so it is O(log n), otherwise it is O(n) (with n being ResultSet::entryCount()), which will be run in parallel if the entry count >= the default of 20000 or if force_parallel is set to true

Note that we currently do not invalidate the cache if the result set is changed (i.e appended to), so this function should only be called after the result set is finalized.

Parameters
force_parallelForces the row count to be computed in parallel if the row count cannot be otherwise be computed from metadata or via a binary search (otherwise parallel search is automatically used for result sets with entryCount() >= 20000)

Definition at line 593 of file ResultSet.cpp.

References CHECK_GE, and uninitialized_cached_row_count.

593  {
594  // cached_row_count_ is atomic, so fetch it into a local variable first
595  // to avoid repeat fetches
596  const int64_t cached_row_count = cached_row_count_;
597  if (cached_row_count != uninitialized_cached_row_count) {
598  CHECK_GE(cached_row_count, 0);
599  return cached_row_count;
600  }
601  setCachedRowCount(rowCountImpl(force_parallel));
602  return cached_row_count_;
603 }
#define CHECK_GE(x, y)
Definition: Logger.h:236
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
size_t rowCountImpl(const bool force_parallel) const
Definition: ResultSet.cpp:555
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
void setCachedRowCount(const size_t row_count) const
Definition: ResultSet.cpp:609
size_t ResultSet::rowCountImpl ( const bool  force_parallel) const
private

Definition at line 555 of file ResultSet.cpp.

References CHECK, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), Projection, and TableFunction.

555  {
556  if (just_explain_) {
557  return 1;
558  }
560  return entryCount();
561  }
562  if (!permutation_.empty()) {
563  // keep_first_ corresponds to SQL LIMIT
564  // drop_first_ corresponds to SQL OFFSET
566  }
567  if (!storage_) {
568  return 0;
569  }
570  CHECK(permutation_.empty());
572  return binSearchRowCount();
573  }
574 
575  constexpr size_t auto_parallel_row_count_threshold{20000UL};
576  if (force_parallel || entryCount() >= auto_parallel_row_count_threshold) {
577  return parallelRowCount();
578  }
579  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
580  moveToBegin();
581  size_t row_count{0};
582  while (true) {
583  auto crt_row = getNextRowUnlocked(false, false);
584  if (crt_row.empty()) {
585  break;
586  }
587  ++row_count;
588  }
589  moveToBegin();
590  return row_count;
591 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:943
Permutation permutation_
Definition: ResultSet.h:912
void moveToBegin() const
Definition: ResultSet.cpp:731
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
size_t keep_first_
Definition: ResultSet.h:910
const bool just_explain_
Definition: ResultSet.h:940
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t parallelRowCount() const
Definition: ResultSet.cpp:629
size_t drop_first_
Definition: ResultSet.h:909
QueryDescriptionType getQueryDescriptionType() const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:223
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
size_t binSearchRowCount() const
Definition: ResultSet.cpp:616

+ Here is the call graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 205 of file ResultSet.h.

Referenced by rowIterator().

207  {
208  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
209 
210  // move to first logical position
211  ++rowIterator;
212 
213  for (size_t index = 0; index < from_logical_index; index++) {
214  ++rowIterator;
215  }
216 
217  return rowIterator;
218  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:205

+ Here is the caller graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 220 of file ResultSet.h.

References rowIterator().

221  {
222  return rowIterator(0, translate_strings, decimal_to_double);
223  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:205

+ Here is the call graph for this function:

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const
void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private
void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private
void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private
void ResultSet::setCached ( bool  val)
inline

Definition at line 477 of file ResultSet.h.

References cached_.

477 { cached_ = val; }
bool cached_
Definition: ResultSet.h:949
void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 609 of file ResultSet.cpp.

References CHECK, and uninitialized_cached_row_count.

609  {
610  const int64_t signed_row_count = static_cast<int64_t>(row_count);
611  const int64_t old_cached_row_count = cached_row_count_.exchange(signed_row_count);
612  CHECK(old_cached_row_count == uninitialized_cached_row_count ||
613  old_cached_row_count == signed_row_count);
614 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:942
#define CHECK(condition)
Definition: Logger.h:223
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
void ResultSet::setExecTime ( const long  exec_time)
inline

Definition at line 481 of file ResultSet.h.

References query_exec_time_.

481 { query_exec_time_ = exec_time; }
size_t query_exec_time_
Definition: ResultSet.h:951
void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 529 of file ResultSet.h.

References geo_return_type_.

Referenced by initStatus().

529 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:946

+ Here is the caller graph for this function:

void ResultSet::setInputTableKeys ( std::unordered_set< size_t > &&  intput_table_keys)
inline

Definition at line 491 of file ResultSet.h.

References input_table_keys_.

491  {
492  input_table_keys_ = std::move(intput_table_keys);
493  }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:953
void ResultSet::setKernelQueueTime ( const int64_t  kernel_queue_time)

Definition at line 714 of file ResultSet.cpp.

714  {
715  timings_.kernel_queue_time = kernel_queue_time;
716 }
QueryExecutionTimings timings_
Definition: ResultSet.h:917
void ResultSet::setQueryPlanHash ( const QueryPlanHash  query_plan)
inline

Definition at line 485 of file ResultSet.h.

References query_plan_.

485 { query_plan_ = query_plan; }
QueryPlanHash query_plan_
Definition: ResultSet.h:952
void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 710 of file ResultSet.cpp.

710  {
711  timings_.executor_queue_time = queue_time;
712 }
QueryExecutionTimings timings_
Definition: ResultSet.h:917
void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 571 of file ResultSet.h.

References separate_varlen_storage_valid_.

571  {
573  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:938
void ResultSet::setTargetMetaInfo ( const std::vector< TargetMetaInfo > &  target_meta_info)
inline

Definition at line 495 of file ResultSet.h.

References gpu_enabled::copy(), and target_meta_info_.

495  {
496  std::copy(target_meta_info.begin(),
497  target_meta_info.end(),
498  std::back_inserter(target_meta_info_));
499  }
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:954

+ Here is the call graph for this function:

void ResultSet::setUseSpeculativeTopNSort ( bool  value)
inline

Definition at line 507 of file ResultSet.h.

References can_use_speculative_top_n_sort.

std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:959
void ResultSet::setValidationOnlyRes ( )

Definition at line 744 of file ResultSet.cpp.

744  {
745  for_validation_only_ = true;
746 }
bool for_validation_only_
Definition: ResultSet.h:941
void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
size_t  top_n,
const Executor executor 
)

Definition at line 768 of file ResultSet.cpp.

References Executor::baseline_threshold, CHECK, DEBUG_TIMER, g_enable_watchdog, g_parallel_top_max, g_parallel_top_min, LOG, VectorView< T >::size(), and logger::WARNING.

770  {
771  auto timer = DEBUG_TIMER(__func__);
772 
773  if (!storage_) {
774  return;
775  }
777  CHECK(!targets_.empty());
778 #ifdef HAVE_CUDA
779  if (canUseFastBaselineSort(order_entries, top_n)) {
780  baselineSort(order_entries, top_n, executor);
781  return;
782  }
783 #endif // HAVE_CUDA
784  if (query_mem_desc_.sortOnGpu()) {
785  try {
786  radixSortOnGpu(order_entries);
787  } catch (const OutOfMemory&) {
788  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
789  radixSortOnCpu(order_entries);
790  } catch (const std::bad_alloc&) {
791  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
792  radixSortOnCpu(order_entries);
793  }
794  return;
795  }
796  // This check isn't strictly required, but allows the index buffer to be 32-bit.
797  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
798  throw RowSortException("Sorting more than 4B elements not supported");
799  }
800 
801  CHECK(permutation_.empty());
802 
803  if (top_n && g_parallel_top_min < entryCount()) {
805  throw WatchdogException("Sorting the result would be too slow");
806  }
807  parallelTop(order_entries, top_n, executor);
808  } else {
810  throw WatchdogException("Sorting the result would be too slow");
811  }
813  // PermutationView is used to share common API with parallelTop().
814  PermutationView pv(permutation_.data(), 0, permutation_.size());
815  pv = initPermutationBuffer(pv, 0, permutation_.size());
816  if (top_n == 0) {
817  top_n = pv.size(); // top_n == 0 implies a full sort
818  }
819  pv = topPermutation(pv, top_n, createComparator(order_entries, pv, executor, false));
820  if (pv.size() < permutation_.size()) {
821  permutation_.resize(pv.size());
822  permutation_.shrink_to_fit();
823  }
824  }
825 }
size_t g_parallel_top_max
Definition: ResultSet.cpp:48
Permutation permutation_
Definition: ResultSet.h:912
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:846
#define LOG(tag)
Definition: Logger.h:217
static const size_t baseline_threshold
Definition: Execute.h:1276
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:905
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
Definition: ResultSet.cpp:866
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:1262
size_t g_parallel_top_min
Definition: ResultSet.cpp:47
DEVICE size_type size() const
Definition: VectorView.h:84
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
bool g_enable_watchdog
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:836
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:1222
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1208
#define CHECK(condition)
Definition: Logger.h:223
#define DEBUG_TIMER(name)
Definition: Logger.h:370

+ Here is the call graph for this function:

std::string ResultSet::summaryToString ( ) const

Definition at line 218 of file ResultSet.cpp.

218  {
219  std::ostringstream oss;
220  oss << "Result Set Info" << std::endl;
221  oss << "\tLayout: " << query_mem_desc_.queryDescTypeToString() << std::endl;
222  oss << "\tColumns: " << colCount() << std::endl;
223  oss << "\tRows: " << rowCount() << std::endl;
224  oss << "\tEntry count: " << entryCount() << std::endl;
225  const std::string is_empty = isEmpty() ? "True" : "False";
226  oss << "\tIs empty: " << is_empty << std::endl;
227  const std::string did_output_columnar = didOutputColumnar() ? "True" : "False;";
228  oss << "\tColumnar: " << did_output_columnar << std::endl;
229  oss << "\tLazy-fetched columns: " << getNumColumnsLazyFetched() << std::endl;
230  const std::string is_direct_columnar_conversion_possible =
231  isDirectColumnarConversionPossible() ? "True" : "False";
232  oss << "\tDirect columnar conversion possible: "
233  << is_direct_columnar_conversion_possible << std::endl;
234 
235  size_t num_columns_zero_copy_columnarizable{0};
236  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
237  if (isZeroCopyColumnarConversionPossible(target_idx)) {
238  num_columns_zero_copy_columnarizable++;
239  }
240  }
241  oss << "\tZero-copy columnar conversion columns: "
242  << num_columns_zero_copy_columnarizable << std::endl;
243 
244  oss << "\tPermutation size: " << permutation_.size() << std::endl;
245  oss << "\tLimit: " << keep_first_ << std::endl;
246  oss << "\tOffset: " << drop_first_ << std::endl;
247  return oss.str();
248 }
Permutation permutation_
Definition: ResultSet.h:912
bool didOutputColumnar() const
Definition: ResultSet.h:537
size_t getNumColumnsLazyFetched() const
Definition: ResultSet.h:566
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:904
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:593
size_t keep_first_
Definition: ResultSet.h:910
size_t colCount() const
Definition: ResultSet.cpp:413
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1379
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:901
size_t drop_first_
Definition: ResultSet.h:909
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool isEmpty() const
Returns a boolean signifying whether there are valid entries in the result set.
Definition: ResultSet.cpp:649
std::string queryDescTypeToString() const
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1357
void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 696 of file ResultSet.cpp.

References CHECK, CHECK_EQ, checked_calloc(), data_mgr_(), getQueryEngineCudaStreamForDevice(), and GPU.

696  {
699  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
701  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
703  auto device_buffer_ptr = device_estimator_buffer_->getMemoryPtr();
704  auto allocator = std::make_unique<CudaAllocator>(
706  allocator->copyFromDevice(
707  host_estimator_buffer_, device_buffer_ptr, estimator_->getBufferSize());
708 }
#define CHECK_EQ(x, y)
Definition: Logger.h:231
virtual int8_t * getMemoryPtr()=0
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:932
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:929
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
int8_t * host_estimator_buffer_
Definition: ResultSet.h:931
const ExecutorDeviceType device_type_
Definition: ResultSet.h:902
#define CHECK(condition)
Definition: Logger.h:223
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:930
const int device_id_
Definition: Result