OmniSciDB  c0231cc57d
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowIterationState
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
std::string toString () const
 
std::string summaryToString () const
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &, std::shared_ptr< VarlenOutputInfo >=nullptr) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, size_t top_n, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step) More...
 
void invalidateCachedRowCount () const
 
void setCachedRowCount (const size_t row_count) const
 
bool isEmpty () const
 Returns a boolean signifying whether there are valid entries in the result set. More...
 
size_t entryCount () const
 Returns the number of entries the result set is allocated to hold. More...
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
std::string getExplanation () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const PermutationgetPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
ResultSetPtr copy ()
 
void clearPermutation ()
 
void initStatus ()
 
void invalidateResultSetChunks ()
 
const bool isEstimator () const
 
void setCached (bool val)
 
const bool isCached () const
 
void setExecTime (const long exec_time)
 
const long getExecTime () const
 
void setQueryPlanHash (const QueryPlanHash query_plan)
 
const QueryPlanHash getQueryPlanHash ()
 
std::unordered_set< size_t > getInputTableKeys () const
 
void setInputTableKeys (std::unordered_set< size_t > &&intput_table_keys)
 
void setTargetMetaInfo (const std::vector< TargetMetaInfo > &target_meta_info)
 
std::vector< TargetMetaInfogetTargetMetaInfo ()
 
std::optional< bool > canUseSpeculativeTopNSort () const
 
void setUseSpeculativeTopNSort (bool value)
 
const bool hasValidBuffer () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
bool areAnyColumnsLazyFetched () const
 
size_t getNumColumnsLazyFetched () const
 
void setSeparateVarlenStorageValid (const bool val)
 
const std::vector< std::string > getStringDictionaryPayloadCopy (const int dict_id) const
 
const std::pair< std::vector
< int32_t >, std::vector
< std::string > > 
getUniqueStringsForDictEncodedTargetCol (const size_t col_idx) const
 
StringDictionaryProxygetStringDictionaryProxy (int const dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
ChunkStats getTableFunctionChunkStats (const size_t target_idx) const
 
void translateDictEncodedColumns (std::vector< TargetInfo > const &, size_t const start_idx)
 
void eachCellInColumn (RowIterationState &, CellCallback const &)
 
const ExecutorgetExecutor () const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 
static double calculateQuantile (quantile::TDigest *const t_digest)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using ApproxQuantileBuffers = std::vector< std::vector< double >>
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
const VarlenOutputInfogetVarlenOutputInfo (const size_t entry_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
Comparator createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
 
PermutationView initPermutationBuffer (PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
size_t rowCountImpl (const bool force_parallel) const
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (CountDistinctSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static PermutationView topPermutation (PermutationView, const size_t n, const Comparator &)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
Permutation permutation_
 
const Catalog_Namespace::Catalogcatalog_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
bool cached_
 
size_t query_exec_time_
 
QueryPlanHash query_plan_
 
std::unordered_set< size_t > input_table_keys_
 
std::vector< TargetMetaInfotarget_meta_info_
 
std::optional< bool > can_use_speculative_top_n_sort
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 157 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::ApproxQuantileBuffers = std::vector<std::vector<double>>
private

Definition at line 793 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 933 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 517 of file ResultSet.h.

517  {
520  WktString,
523  GeoTargetValueGpuPtr
525  };
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:187
boost::variant< GeoPointTargetValuePtr, GeoMultiPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoMultiLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:193

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 62 of file ResultSet.cpp.

69  : targets_(targets)
70  , device_type_(device_type)
71  , device_id_(-1)
72  , query_mem_desc_(query_mem_desc)
74  , fetched_so_far_(0)
75  , drop_first_(0)
76  , keep_first_(0)
77  , row_set_mem_owner_(row_set_mem_owner)
78  , catalog_(catalog)
79  , block_size_(block_size)
80  , grid_size_(grid_size)
81  , data_mgr_(nullptr)
83  , just_explain_(false)
84  , for_validation_only_(false)
87  , cached_(false)
88  , query_exec_time_(0)
90  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
size_t query_exec_time_
Definition: ResultSet.h:949
size_t keep_first_
Definition: ResultSet.h:908
const bool just_explain_
Definition: ResultSet.h:938
unsigned block_size_
Definition: ResultSet.h:913
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
size_t drop_first_
Definition: ResultSet.h:907
bool cached_
Definition: ResultSet.h:947
unsigned grid_size_
Definition: ResultSet.h:914
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:930
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
size_t fetched_so_far_
Definition: ResultSet.h:906
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
QueryPlanHash query_plan_
Definition: ResultSet.h:950
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:901
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 92 of file ResultSet.cpp.

104  : targets_(targets)
105  , device_type_(device_type)
106  , device_id_(device_id)
107  , query_mem_desc_(query_mem_desc)
108  , crt_row_buff_idx_(0)
109  , fetched_so_far_(0)
110  , drop_first_(0)
111  , keep_first_(0)
112  , row_set_mem_owner_(row_set_mem_owner)
113  , catalog_(catalog)
114  , block_size_(block_size)
115  , grid_size_(grid_size)
116  , lazy_fetch_info_(lazy_fetch_info)
117  , col_buffers_{col_buffers}
118  , frag_offsets_{frag_offsets}
119  , consistent_frag_sizes_{consistent_frag_sizes}
120  , data_mgr_(nullptr)
122  , just_explain_(false)
123  , for_validation_only_(false)
126  , cached_(false)
127  , query_exec_time_(0)
129  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
size_t query_exec_time_
Definition: ResultSet.h:949
size_t keep_first_
Definition: ResultSet.h:908
const bool just_explain_
Definition: ResultSet.h:938
unsigned block_size_
Definition: ResultSet.h:913
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
size_t drop_first_
Definition: ResultSet.h:907
bool cached_
Definition: ResultSet.h:947
unsigned grid_size_
Definition: ResultSet.h:914
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:930
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:925
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
size_t fetched_so_far_
Definition: ResultSet.h:906
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
QueryPlanHash query_plan_
Definition: ResultSet.h:950
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:901
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 163 of file ResultSet.cpp.

References CPU.

165  , device_id_(-1)
166  , fetched_so_far_(0)
168  , explanation_(explanation)
169  , just_explain_(true)
170  , for_validation_only_(false)
173  , cached_(false)
174  , query_exec_time_(0)
176  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
size_t query_exec_time_
Definition: ResultSet.h:949
const bool just_explain_
Definition: ResultSet.h:938
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
bool cached_
Definition: ResultSet.h:947
std::string explanation_
Definition: ResultSet.h:937
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
size_t fetched_so_far_
Definition: ResultSet.h:906
QueryPlanHash query_plan_
Definition: ResultSet.h:950
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:901
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 178 of file ResultSet.cpp.

References CPU.

182  , device_id_(-1)
183  , fetched_so_far_(0)
184  , row_set_mem_owner_(row_set_mem_owner)
185  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
187  , just_explain_(true)
188  , for_validation_only_(false)
191  , cached_(false)
192  , query_exec_time_(0)
194  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
size_t query_exec_time_
Definition: ResultSet.h:949
const bool just_explain_
Definition: ResultSet.h:938
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
bool cached_
Definition: ResultSet.h:947
QueryExecutionTimings timings_
Definition: ResultSet.h:915
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
size_t fetched_so_far_
Definition: ResultSet.h:906
QueryPlanHash query_plan_
Definition: ResultSet.h:950
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
const int device_id_
Definition: ResultSet.h:901
ResultSet::~ResultSet ( )

Definition at line 196 of file ResultSet.cpp.

References CHECK, CPU, and data_mgr_().

196  {
197  if (storage_) {
198  if (!storage_->buff_is_provided_) {
199  CHECK(storage_->getUnderlyingBuffer());
200  free(storage_->getUnderlyingBuffer());
201  }
202  }
203  for (auto& storage : appended_storage_) {
204  if (storage && !storage->buff_is_provided_) {
205  free(storage->getUnderlyingBuffer());
206  }
207  }
211  }
213  CHECK(data_mgr_);
215  }
216 }
AppendedStorage appended_storage_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:930
int8_t * host_estimator_buffer_
Definition: ResultSet.h:929
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:525
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:928

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 718 of file ResultSet.cpp.

718  {
719  timings_.compilation_queue_time += compilation_queue_time;
720 }
QueryExecutionTimings timings_
Definition: ResultSet.h:915
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &  ,
std::shared_ptr< VarlenOutputInfo = nullptr 
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 299 of file ResultSet.cpp.

References CHECK.

299  {
301  if (!that.storage_) {
302  return;
303  }
304  appended_storage_.push_back(std::move(that.storage_));
307  appended_storage_.back()->query_mem_desc_.getEntryCount());
308  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
309  col_buffers_.insert(
310  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
311  frag_offsets_.insert(
312  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
314  that.consistent_frag_sizes_.begin(),
315  that.consistent_frag_sizes_.end());
316  chunk_iters_.insert(
317  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
319  CHECK(that.separate_varlen_storage_valid_);
321  that.serialized_varlen_buffer_.begin(),
322  that.serialized_varlen_buffer_.end());
323  }
324  for (auto& buff : that.literal_buffers_) {
325  literal_buffers_.push_back(std::move(buff));
326  }
327 }
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:904
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:918
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:935
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:917
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:921
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:925
#define CHECK(condition)
Definition: Logger.h:222
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
bool ResultSet::areAnyColumnsLazyFetched ( ) const
inline

Definition at line 559 of file ResultSet.h.

References lazy_fetch_info_.

559  {
560  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
561  return std::any_of(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
562  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 616 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

616  {
617  if (!storage_) {
618  return 0;
619  }
620 
621  size_t row_count = storage_->binSearchRowCount();
622  for (auto& s : appended_storage_) {
623  row_count += s->binSearchRowCount();
624  }
625 
626  return get_truncated_row_count(row_count, getLimit(), drop_first_);
627 }
AppendedStorage appended_storage_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
size_t getLimit() const
Definition: ResultSet.cpp:1302
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:907

+ Here is the call graph for this function:

double ResultSet::calculateQuantile ( quantile::TDigest *const  t_digest)
static

Definition at line 1008 of file ResultSet.cpp.

References CHECK, quantile::detail::TDigest< RealType, IndexType >::mergeBufferFinal(), NULL_DOUBLE, and quantile::detail::TDigest< RealType, IndexType >::quantile().

Referenced by makeTargetValue().

1008  {
1009  static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
1010  CHECK(t_digest);
1011  t_digest->mergeBufferFinal();
1012  double const quantile = t_digest->quantile();
1013  return boost::math::isnan(quantile) ? NULL_DOUBLE : quantile;
1014 }
#define NULL_DOUBLE
DEVICE RealType quantile(VectorView< IndexType const > const partial_sum, RealType const q) const
Definition: quantile.h:827
DEVICE void mergeBufferFinal()
Definition: quantile.h:651
#define CHECK(condition)
Definition: Logger.h:222

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
std::optional<bool> ResultSet::canUseSpeculativeTopNSort ( ) const
inline

Definition at line 501 of file ResultSet.h.

References can_use_speculative_top_n_sort.

501  {
503  }
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
void ResultSet::clearPermutation ( )
inline

Definition at line 446 of file ResultSet.h.

References permutation_.

Referenced by initStatus().

446  {
447  if (!permutation_.empty()) {
448  permutation_.clear();
449  }
450  }
Permutation permutation_
Definition: ResultSet.h:910

+ Here is the caller graph for this function:

size_t ResultSet::colCount ( ) const

Definition at line 413 of file ResultSet.cpp.

413  {
414  return just_explain_ ? 1 : targets_.size();
415 }
const bool just_explain_
Definition: ResultSet.h:938
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
ResultSetPtr ResultSet::copy ( )

Definition at line 329 of file ResultSet.cpp.

References CHECK, gpu_enabled::copy(), and DEBUG_TIMER.

329  {
330  auto timer = DEBUG_TIMER(__func__);
331  if (!storage_) {
332  return nullptr;
333  }
334 
335  auto executor = getExecutor();
336  CHECK(executor);
337  ResultSetPtr copied_rs = std::make_shared<ResultSet>(targets_,
338  device_type_,
341  executor->getCatalog(),
342  executor->blockSize(),
343  executor->gridSize());
344 
345  auto allocate_and_copy_storage =
346  [&](const ResultSetStorage* prev_storage) -> std::unique_ptr<ResultSetStorage> {
347  const auto& prev_qmd = prev_storage->query_mem_desc_;
348  const auto storage_size = prev_qmd.getBufferSizeBytes(device_type_);
349  auto buff = row_set_mem_owner_->allocate(storage_size, /*thread_idx=*/0);
350  std::unique_ptr<ResultSetStorage> new_storage;
351  new_storage.reset(new ResultSetStorage(
352  prev_storage->targets_, prev_qmd, buff, /*buff_is_provided=*/true));
353  new_storage->target_init_vals_ = prev_storage->target_init_vals_;
354  if (prev_storage->varlen_output_info_) {
355  new_storage->varlen_output_info_ = prev_storage->varlen_output_info_;
356  }
357  memcpy(new_storage->buff_, prev_storage->buff_, storage_size);
358  new_storage->query_mem_desc_ = prev_qmd;
359  return new_storage;
360  };
361 
362  copied_rs->storage_ = allocate_and_copy_storage(storage_.get());
363  if (!appended_storage_.empty()) {
364  for (const auto& storage : appended_storage_) {
365  copied_rs->appended_storage_.push_back(allocate_and_copy_storage(storage.get()));
366  }
367  }
368  std::copy(chunks_.begin(), chunks_.end(), std::back_inserter(copied_rs->chunks_));
369  std::copy(chunk_iters_.begin(),
370  chunk_iters_.end(),
371  std::back_inserter(copied_rs->chunk_iters_));
372  std::copy(col_buffers_.begin(),
373  col_buffers_.end(),
374  std::back_inserter(copied_rs->col_buffers_));
375  std::copy(frag_offsets_.begin(),
376  frag_offsets_.end(),
377  std::back_inserter(copied_rs->frag_offsets_));
380  std::back_inserter(copied_rs->consistent_frag_sizes_));
384  std::back_inserter(copied_rs->serialized_varlen_buffer_));
385  }
386  std::copy(literal_buffers_.begin(),
387  literal_buffers_.end(),
388  std::back_inserter(copied_rs->literal_buffers_));
389  std::copy(lazy_fetch_info_.begin(),
390  lazy_fetch_info_.end(),
391  std::back_inserter(copied_rs->lazy_fetch_info_));
392 
393  copied_rs->permutation_ = permutation_;
394  copied_rs->drop_first_ = drop_first_;
395  copied_rs->keep_first_ = keep_first_;
396  copied_rs->separate_varlen_storage_valid_ = separate_varlen_storage_valid_;
397  copied_rs->query_exec_time_ = query_exec_time_;
398  copied_rs->input_table_keys_ = input_table_keys_;
399  copied_rs->target_meta_info_ = target_meta_info_;
400  copied_rs->geo_return_type_ = geo_return_type_;
401  copied_rs->query_plan_ = query_plan_;
403  copied_rs->can_use_speculative_top_n_sort = can_use_speculative_top_n_sort;
404  }
405 
406  return copied_rs;
407 }
Permutation permutation_
Definition: ResultSet.h:910
AppendedStorage appended_storage_
Definition: ResultSet.h:904
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
size_t query_exec_time_
Definition: ResultSet.h:949
std::shared_ptr< ResultSet > ResultSetPtr
size_t keep_first_
Definition: ResultSet.h:908
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:918
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:935
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
const Executor * getExecutor() const
Definition: ResultSet.h:603
size_t drop_first_
Definition: ResultSet.h:907
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:917
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:921
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:951
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:952
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:925
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
#define DEBUG_TIMER(name)
Definition: Logger.h:371
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
QueryPlanHash query_plan_
Definition: ResultSet.h:950
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:924
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copies its content into a contiguous output_buffer

Definition at line 1121 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1123  {
1125  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1126  CHECK(output_buffer_size > 0);
1127  CHECK(output_buffer);
1128  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1129  size_t out_buff_offset = 0;
1130 
1131  // the main storage:
1132  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1133  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1134  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1135  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1136  CHECK(crt_buffer_size <= output_buffer_size);
1137  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1138 
1139  out_buff_offset += crt_buffer_size;
1140 
1141  // the appended storages:
1142  for (size_t i = 0; i < appended_storage_.size(); i++) {
1143  const size_t crt_storage_row_count =
1144  appended_storage_[i]->query_mem_desc_.getEntryCount();
1145  if (crt_storage_row_count == 0) {
1146  // skip an empty appended storage
1147  continue;
1148  }
1149  CHECK_LT(out_buff_offset, output_buffer_size);
1150  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1151  const size_t column_offset =
1152  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1153  const int8_t* storage_buffer =
1154  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1155  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1156  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1157 
1158  out_buff_offset += crt_buffer_size;
1159  }
1160 }
AppendedStorage appended_storage_
Definition: ResultSet.h:904
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:232
#define CHECK(condition)
Definition: Logger.h:222
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1371

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( CountDistinctSet count_distinct_active_buffer_set) const
private
Comparator ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const PermutationView  permutation,
const Executor executor,
const bool  single_threaded 
)
inlineprivate

Definition at line 834 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

837  {
838  auto timer = DEBUG_TIMER(__func__);
840  return [rsc = ResultSetComparator<ColumnWiseTargetAccessor>(
841  order_entries, this, permutation, executor, single_threaded)](
842  const PermutationIdx lhs, const PermutationIdx rhs) {
843  return rsc(lhs, rhs);
844  };
845  } else {
846  return [rsc = ResultSetComparator<RowWiseTargetAccessor>(
847  order_entries, this, permutation, executor, single_threaded)](
848  const PermutationIdx lhs, const PermutationIdx rhs) {
849  return rsc(lhs, rhs);
850  };
851  }
852  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define DEBUG_TIMER(name)
Definition: Logger.h:371

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 668 of file ResultSet.cpp.

668  {
669  return (!storage_ && !estimator_ && !just_explain_) || cached_row_count_ == 0;
670 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
const bool just_explain_
Definition: ResultSet.h:938
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:927
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 535 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

535 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 57 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

57  {
59  drop_first_ = n;
60 }
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:907
constexpr double n
Definition: Utm.h:38
void ResultSet::eachCellInColumn ( RowIterationState state,
CellCallback const &  func 
)

Definition at line 485 of file ResultSet.cpp.

References advance_slot(), advance_to_next_columnar_target_buff(), ResultSet::RowIterationState::agg_idx_, align_to_int64(), ResultSet::RowIterationState::buf_ptr_, CHECK, CHECK_GE, CHECK_LT, ResultSet::RowIterationState::compact_sz1_, ResultSet::RowIterationState::cur_target_idx_, QueryMemoryDescriptor::didOutputColumnar(), get_cols_ptr(), get_key_bytes_rowwise(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), ResultSet::RowIterationState::prev_target_idx_, read_int_from_buff(), and row_ptr_rowwise().

485  {
486  size_t const target_idx = state.cur_target_idx_;
487  QueryMemoryDescriptor& storage_qmd = storage_->query_mem_desc_;
488  CHECK_LT(target_idx, lazy_fetch_info_.size());
489  auto& col_lazy_fetch = lazy_fetch_info_[target_idx];
490  CHECK(col_lazy_fetch.is_lazily_fetched);
491  int const target_size = storage_->targets_[target_idx].sql_type.get_size();
492  CHECK_LT(0, target_size) << storage_->targets_[target_idx].toString();
493  size_t const nrows = storage_->binSearchRowCount();
494  if (storage_qmd.didOutputColumnar()) {
495  // Logic based on ResultSet::ColumnWiseTargetAccessor::initializeOffsetsForStorage()
496  if (state.buf_ptr_ == nullptr) {
497  state.buf_ptr_ = get_cols_ptr(storage_->buff_, storage_qmd);
498  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
499  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
501  }
502  for (size_t j = state.prev_target_idx_; j < state.cur_target_idx_; ++j) {
503  size_t const next_target_idx = j + 1; // Set state to reflect next target_idx j+1
504  state.buf_ptr_ = advance_to_next_columnar_target_buff(
505  state.buf_ptr_, storage_qmd, state.agg_idx_);
506  auto const& next_agg_info = storage_->targets_[next_target_idx];
507  state.agg_idx_ =
508  advance_slot(state.agg_idx_, next_agg_info, separate_varlen_storage_valid_);
509  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
510  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
512  }
513  for (size_t i = 0; i < nrows; ++i) {
514  int8_t const* const pos_ptr = state.buf_ptr_ + i * state.compact_sz1_;
515  int64_t pos = read_int_from_buff(pos_ptr, target_size);
516  CHECK_GE(pos, 0);
517  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
518  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
519  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
520  func(col_frag + pos * target_size);
521  }
522  } else {
523  size_t const key_bytes_with_padding =
525  for (size_t i = 0; i < nrows; ++i) {
526  int8_t const* const keys_ptr = row_ptr_rowwise(storage_->buff_, storage_qmd, i);
527  int8_t const* const rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
528  int64_t pos = *reinterpret_cast<int64_t const*>(rowwise_target_ptr);
529  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
530  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
531  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
532  func(col_frag + pos * target_size);
533  }
534  }
535 }
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define CHECK_GE(x, y)
Definition: Logger.h:235
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
size_t getEffectiveKeyWidth() const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:232
#define CHECK(condition)
Definition: Logger.h:222
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const

+ Here is the call graph for this function:

size_t ResultSet::entryCount ( ) const

Returns the number of entries the result set is allocated to hold.

Note that this can be greater than or equal to the actual number of valid rows in the result set, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by)

For getting the number of valid rows in the result set (inclusive of any applied LIMIT and/or OFFSET), use ResultSet::rowCount(). Or to just test if there are any valid rows, use ResultSet::entryCount(), as a return value from entryCount() greater than 0 does not neccesarily mean the result set is empty.

Definition at line 750 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

750  {
751  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
752 }
Permutation permutation_
Definition: ResultSet.h:910
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 408 of file ResultSet.h.

References CHECK, and storage_.

408  {
409  CHECK(storage_);
410  if (storage_->query_mem_desc_.didOutputColumnar()) {
411  storage_->fillOneEntryColWise(entry);
412  } else {
413  storage_->fillOneEntryRowWise(entry);
414  }
415  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK(condition)
Definition: Logger.h:222
ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 939 of file ResultSet.cpp.

Referenced by getVarlenOutputInfo(), and makeGeoTargetValue().

939  {
940  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
941  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
942  fixedup_entry_idx,
943  stg_idx};
944 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
AppendedStorage appended_storage_
Definition: ResultSet.h:904
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 756 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), Executor::executeTableFunction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initRowGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

757  {
758  auto query_mem_desc_copy = query_mem_desc;
759  query_mem_desc_copy.resetGroupColWidths(
760  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
761  if (query_mem_desc.didOutputColumnar()) {
762  return query_mem_desc_copy;
763  }
764  query_mem_desc_copy.alignPaddedSlots();
765  return query_mem_desc_copy;
766 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 754 of file ResultSetIteration.cpp.

References CHECK, and storage_.

754  {
755  CHECK(storage_);
756  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
757 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK(condition)
Definition: Logger.h:222
SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 417 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

417  {
418  if (just_explain_) {
419  return SQLTypeInfo(kTEXT, false);
420  }
421  CHECK_LT(col_idx, targets_.size());
422  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
423  : targets_[col_idx].sql_type;
424 }
const bool just_explain_
Definition: ResultSet.h:938
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
#define CHECK_LT(x, y)
Definition: Logger.h:232
Definition: sqltypes.h:66
Definition: sqldefs.h:73
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1293 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1295  {
1296  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1297  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1298  const auto column_offset =
1299  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1300  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1301  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1302  storage_->query_mem_desc_.getEntryCount();
1303  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1304  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1305 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK_NE(x, y)
Definition: Logger.h:231
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1402 of file ResultSet.cpp.

References CHECK.

1402  {
1404  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1405 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1393
#define CHECK(condition)
Definition: Logger.h:222
size_t getColOffInBytes(const size_t col_idx) const
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1239 of file ResultSetIteration.cpp.

References storage_.

1241  {
1242  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1243  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1244  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1245 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1086 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1088  {
1089  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1090  if (col_buffers_[storage_idx].size() > 1) {
1091  int64_t frag_id = 0;
1092  int64_t local_idx = global_idx;
1093  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1094  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1095  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1096  } else {
1097  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1098  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1099  CHECK_LE(local_idx, global_idx);
1100  }
1101  CHECK_GE(frag_id, int64_t(0));
1102  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1103  global_idx = local_idx;
1104  return col_buffers_[storage_idx][frag_id];
1105  } else {
1106  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1107  return col_buffers_[storage_idx][0];
1108  }
1109 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
#define CHECK_GE(x, y)
Definition: Logger.h:235
#define CHECK_LT(x, y)
Definition: Logger.h:232
#define CHECK_LE(x, y)
Definition: Logger.h:233
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:925
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:924
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 291 of file ResultSet.cpp.

291  {
292  if (crt_row_buff_idx_ == 0) {
293  throw std::runtime_error("current row buffer iteration index is undefined");
294  }
295  return crt_row_buff_idx_ - 1;
296 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 686 of file ResultSet.cpp.

References CHECK, and GPU.

686  {
690 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:928
int ResultSet::getDeviceId ( ) const

Definition at line 752 of file ResultSet.cpp.

752  {
753  return device_id_;
754 }
const int device_id_
Definition: ResultSet.h:901
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 250 of file ResultSet.cpp.

250  {
251  return device_type_;
252 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1163 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1165  {
1166  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1167  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1168  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1169  } else {
1170  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1171  }
1172  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1173  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1174  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1175  } else {
1176  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1177  }
1178  } else {
1179  UNREACHABLE() << "Invalid query type is used";
1180  return 0;
1181  }
1182 }
#define UNREACHABLE()
Definition: Logger.h:266
const long ResultSet::getExecTime ( ) const
inline

Definition at line 481 of file ResultSet.h.

References query_exec_time_.

481 { return query_exec_time_; }
size_t query_exec_time_
Definition: ResultSet.h:949
const Executor* ResultSet::getExecutor ( ) const
inline

Definition at line 603 of file ResultSet.h.

References QueryMemoryDescriptor::getExecutor(), and query_mem_desc_.

603 { return query_mem_desc_.getExecutor(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
const Executor * getExecutor() const

+ Here is the call graph for this function:

std::string ResultSet::getExplanation ( ) const
inline

Definition at line 393 of file ResultSet.h.

References explanation_, and just_explain_.

393  {
394  if (just_explain_) {
395  return explanation_;
396  }
397  return {};
398  }
const bool just_explain_
Definition: ResultSet.h:938
std::string explanation_
Definition: ResultSet.h:937
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 526 of file ResultSet.h.

References geo_return_type_.

526 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
int ResultSet::getGpuCount ( ) const
private
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 692 of file ResultSet.cpp.

692  {
693  return host_estimator_buffer_;
694 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:929
std::unordered_set<size_t> ResultSet::getInputTableKeys ( ) const
inline

Definition at line 487 of file ResultSet.h.

References input_table_keys_.

487 { return input_table_keys_; }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:951
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 555 of file ResultSet.h.

References lazy_fetch_info_.

555  {
556  return lazy_fetch_info_;
557  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
size_t ResultSet::getLimit ( ) const

Definition at line 1302 of file ResultSet.cpp.

1302  {
1303  return keep_first_;
1304 }
size_t keep_first_
Definition: ResultSet.h:908
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, CHECK_LE, LOG, and logger::WARNING.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  if (bits_set == 0) {
38  // empty result set, return 1 for a groups buffer size of 1
39  return 1;
40  }
41  const auto total_bits = estimator_->getBufferSize() * 8;
42  CHECK_LE(bits_set, total_bits);
43  const auto unset_bits = total_bits - bits_set;
44  const auto ratio = static_cast<double>(unset_bits) / total_bits;
45  if (ratio == 0.) {
46  LOG(WARNING)
47  << "Failed to get a high quality cardinality estimation, falling back to "
48  "approximate group by buffer size guess.";
49  return 0;
50  }
51  return -static_cast<double>(total_bits) * log(ratio);
52 }
#define LOG(tag)
Definition: Logger.h:216
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:927
#define CHECK_LE(x, y)
Definition: Logger.h:233
int8_t * host_estimator_buffer_
Definition: ResultSet.h:929
#define CHECK(condition)
Definition: Logger.h:222
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 296 of file ResultSetIteration.cpp.

297  {
298  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299  if (!storage_ && !just_explain_) {
300  return {};
301  }
302  return getNextRowUnlocked(translate_strings, decimal_to_double);
303 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:941
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
const bool just_explain_
Definition: ResultSet.h:938
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 318 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

319  {
320  size_t entry_buff_idx = 0;
321  do {
323  return {};
324  }
325 
326  entry_buff_idx = advanceCursorToNextEntry();
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
330  return {};
331  }
333  ++fetched_so_far_;
334 
335  } while (drop_first_ && fetched_so_far_ <= drop_first_);
336 
337  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
338  CHECK(!row.empty());
339 
340  return row;
341 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:908
size_t drop_first_
Definition: ResultSet.h:907
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:222
size_t fetched_so_far_
Definition: ResultSet.h:906
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
size_t advanceCursorToNextEntry() const
std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 305 of file ResultSetIteration.cpp.

307  {
308  if (just_explain_) {
309  if (fetched_so_far_) {
310  return {};
311  }
312  fetched_so_far_ = 1;
313  return {explanation_};
314  }
315  return getNextRowImpl(translate_strings, decimal_to_double);
316 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:938
std::string explanation_
Definition: ResultSet.h:937
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:906
size_t ResultSet::getNumColumnsLazyFetched ( ) const
inline

Definition at line 564 of file ResultSet.h.

References lazy_fetch_info_.

564  {
565  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
566  return std::count_if(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
567  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 234 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), and row_ptr_rowwise().

234  {
235  const auto storage_lookup_result = findStorage(global_entry_idx);
236  const auto storage = storage_lookup_result.storage_ptr;
237  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238  if (storage->isEmptyEntry(local_entry_idx)) {
239  return {0, false};
240  }
241  const auto buff = storage->buff_;
242  CHECK(buff);
244  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245  const auto key_bytes_with_padding =
247  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
249  keys_ptr,
250  global_entry_idx,
251  targets_.front(),
252  0,
253  0,
254  false,
255  false,
256  false);
257  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
258  CHECK(scalar_tv);
259  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
260  CHECK(ival_ptr);
261  return {*ival_ptr, true};
262 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK(condition)
Definition: Logger.h:222
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 544 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

544  {
545  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
546  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const Permutation & ResultSet::getPermutationBuffer ( ) const

Definition at line 862 of file ResultSet.cpp.

862  {
863  return permutation_;
864 }
Permutation permutation_
Definition: ResultSet.h:910
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 540 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

540  {
542  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 672 of file ResultSet.cpp.

References CHECK.

672  {
673  CHECK(storage_);
674  return storage_->query_mem_desc_;
675 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK(condition)
Definition: Logger.h:222
const QueryPlanHash ResultSet::getQueryPlanHash ( )
inline

Definition at line 485 of file ResultSet.h.

References query_plan_.

485 { return query_plan_; }
QueryPlanHash query_plan_
Definition: ResultSet.h:950
int64_t ResultSet::getQueueTime ( ) const

Definition at line 722 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 727 of file ResultSet.cpp.

727  {
728  return timings_.render_time;
729 }
QueryExecutionTimings timings_
Definition: ResultSet.h:915
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 273 of file ResultSetIteration.cpp.

275  {
276  if (logical_index >= entryCount()) {
277  return {};
278  }
279  const auto entry_idx =
280  permutation_.empty() ? logical_index : permutation_[logical_index];
281  return getRowAt(entry_idx, false, false, false, targets_to_skip);
282 }
Permutation permutation_
Definition: ResultSet.h:910
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 429 of file ResultSet.h.

References row_set_mem_owner_.

429  {
430  return row_set_mem_owner_;
431  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1271 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1273  {
1274  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1275  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1276  auto keys_ptr = row_ptr_rowwise(
1277  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1278  const auto column_offset =
1279  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1280  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1281  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1282  const auto storage_buffer = keys_ptr + column_offset;
1283  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1284 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK_NE(x, y)
Definition: Logger.h:231
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1254 of file ResultSetIteration.cpp.

References storage_.

1256  {
1257  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1258  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1259  const int8_t* storage_buffer =
1260  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1261  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1262 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1408 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1408  {
1409  std::vector<bool> target_bitmap(targets_.size(), true);
1410  size_t num_single_slot_targets = 0;
1411  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1412  const auto& sql_type = targets_[target_idx].sql_type;
1413  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1414  target_bitmap[target_idx] = false;
1415  } else if (sql_type.is_varlen()) {
1416  target_bitmap[target_idx] = false;
1417  } else {
1418  num_single_slot_targets++;
1419  }
1420  }
1421  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1422 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
Definition: sqldefs.h:73

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1451 of file ResultSet.cpp.

References advance_slot().

1451  {
1452  std::vector<size_t> slot_indices(targets_.size(), 0);
1453  size_t slot_index = 0;
1454  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1455  slot_indices[target_idx] = slot_index;
1456  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1457  }
1458  return slot_indices;
1459 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 409 of file ResultSet.cpp.

409  {
410  return storage_.get();
411 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 914 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

914  {
915  size_t fixedup_entry_idx = entry_idx;
916  auto entry_count = storage_->query_mem_desc_.getEntryCount();
917  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
918  if (fixedup_entry_idx < entry_count) {
919  return {0, fixedup_entry_idx};
920  }
921  fixedup_entry_idx -= entry_count;
922  for (size_t i = 0; i < appended_storage_.size(); ++i) {
923  const auto& desc = appended_storage_[i]->query_mem_desc_;
924  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
925  entry_count = desc.getEntryCount();
926  if (fixedup_entry_idx < entry_count) {
927  return {i + 1, fixedup_entry_idx};
928  }
929  fixedup_entry_idx -= entry_count;
930  }
931  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
933  return {};
934 }
AppendedStorage appended_storage_
Definition: ResultSet.h:904
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define UNREACHABLE()
Definition: Logger.h:266
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK_NE(x, y)
Definition: Logger.h:231

+ Here is the caller graph for this function:

const std::vector< std::string > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 1306 of file ResultSet.cpp.

References catalog_(), and CHECK.

1307  {
1308  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1309  dict_id, /*with_generation=*/true, catalog_);
1310  CHECK(sdp);
1311  return sdp->getDictionary()->copyStrings();
1312 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
#define CHECK(condition)
Definition: Logger.h:222

+ Here is the call graph for this function:

StringDictionaryProxy * ResultSet::getStringDictionaryProxy ( int const  dict_id) const

Definition at line 426 of file ResultSet.cpp.

References catalog_().

426  {
427  constexpr bool with_generation = true;
428  return catalog_ ? row_set_mem_owner_->getOrAddStringDictProxy(
429  dict_id, with_generation, catalog_)
430  : row_set_mem_owner_->getStringDictProxy(dict_id);
431 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909

+ Here is the call graph for this function:

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1432 of file ResultSet.cpp.

References CHECK, CHECK_GE, is_distinct_target(), kAPPROX_QUANTILE, kFLOAT, and kSAMPLE.

1433  {
1435  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1436 
1437  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1438  const auto& target = targets_[target_idx];
1439  if (single_slot_targets[target_idx] &&
1440  (is_distinct_target(target) || target.agg_kind == kAPPROX_QUANTILE ||
1441  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1442  single_slot_targets[target_idx] = false;
1443  num_single_slot_targets--;
1444  }
1445  }
1446  CHECK_GE(num_single_slot_targets, size_t(0));
1447  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1448 }
#define CHECK_GE(x, y)
Definition: Logger.h:235
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1408
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:107
#define CHECK(condition)
Definition: Logger.h:222
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1371

+ Here is the call graph for this function:

ChunkStats ResultSet::getTableFunctionChunkStats ( const size_t  target_idx) const
const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 677 of file ResultSet.cpp.

677  {
678  return targets_;
679 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 681 of file ResultSet.cpp.

References CHECK.

681  {
682  CHECK(storage_);
683  return storage_->target_init_vals_;
684 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
#define CHECK(condition)
Definition: Logger.h:222
std::vector<TargetMetaInfo> ResultSet::getTargetMetaInfo ( )
inline

Definition at line 499 of file ResultSet.h.

References target_meta_info_.

499 { return target_meta_info_; }
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:952
TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1965 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1975  {
1977  const auto col1_ptr = col_ptr;
1978  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1979  const auto next_col_ptr =
1980  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1981  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1982  is_real_str_or_array(target_info))
1983  ? next_col_ptr
1984  : nullptr;
1985  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1986  is_real_str_or_array(target_info))
1987  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1988  : 0;
1989 
1990  // TODO(Saman): add required logics for count distinct
1991  // geospatial target values:
1992  if (target_info.sql_type.is_geometry()) {
1993  return makeGeoTargetValue(
1994  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1995  }
1996 
1997  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1998  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1999  CHECK(col2_ptr);
2000  CHECK(compact_sz2);
2001  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
2002  return target_info.agg_kind == kAVG
2003  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2004  : makeVarlenTargetValue(ptr1,
2005  compact_sz1,
2006  ptr2,
2007  compact_sz2,
2008  target_info,
2009  target_logical_idx,
2010  translate_strings,
2011  global_entry_idx);
2012  }
2014  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2015  return makeTargetValue(ptr1,
2016  compact_sz1,
2017  target_info,
2018  target_logical_idx,
2019  translate_strings,
2021  global_entry_idx);
2022  }
2023  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2024  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
2025  CHECK_GE(key_idx, 0);
2026  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
2027  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
2028  key_width,
2029  target_info,
2030  target_logical_idx,
2031  translate_strings,
2033  global_entry_idx);
2034 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define CHECK_GE(x, y)
Definition: Logger.h:235
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:222
bool is_geometry() const
Definition: sqltypes.h:612
Definition: sqldefs.h:73
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 2038 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

2047  {
2048  if (UNLIKELY(fixup_count_distinct_pointers)) {
2049  if (is_distinct_target(target_info)) {
2050  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2051  const auto remote_ptr = *count_distinct_ptr_ptr;
2052  if (remote_ptr) {
2053  const auto ptr = storage_->mappedPtr(remote_ptr);
2054  if (ptr) {
2055  *count_distinct_ptr_ptr = ptr;
2056  } else {
2057  // need to create a zero filled buffer for this remote_ptr
2058  const auto& count_distinct_desc =
2059  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2060  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2061  ? count_distinct_desc.bitmapSizeBytes()
2062  : count_distinct_desc.bitmapPaddedSizeBytes();
2063  auto count_distinct_buffer = row_set_mem_owner_->allocateCountDistinctBuffer(
2064  bitmap_byte_sz, /*thread_idx=*/0);
2065  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2066  }
2067  }
2068  }
2069  return int64_t(0);
2070  }
2071  if (target_info.sql_type.is_geometry()) {
2072  return makeGeoTargetValue(
2073  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2074  }
2075 
2076  auto ptr1 = rowwise_target_ptr;
2077  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2079  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2080  // Single column perfect hash group by can utilize one slot for both the key and the
2081  // target value if both values fit in 8 bytes. Use the target value actual size for
2082  // this case. If they don't, the target value should be 8 bytes, so we can still use
2083  // the actual size rather than the compact size.
2084  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2085  }
2086 
2087  // logic for deciding width of column
2088  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2089  const auto ptr2 =
2090  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2091  int8_t compact_sz2 = 0;
2092  // Skip reading the second slot if we have a none encoded string and are using
2093  // the none encoded strings buffer attached to ResultSetStorage
2095  (target_info.sql_type.is_array() ||
2096  (target_info.sql_type.is_string() &&
2097  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2098  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2099  }
2100  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2101  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2102  }
2103  CHECK(ptr2);
2104  return target_info.agg_kind == kAVG
2105  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2106  : makeVarlenTargetValue(ptr1,
2107  compact_sz1,
2108  ptr2,
2109  compact_sz2,
2110  target_info,
2111  target_logical_idx,
2112  translate_strings,
2113  entry_buff_idx);
2114  }
2116  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2117  return makeTargetValue(ptr1,
2118  compact_sz1,
2119  target_info,
2120  target_logical_idx,
2121  translate_strings,
2123  entry_buff_idx);
2124  }
2125  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2126  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2127  return makeTargetValue(ptr1,
2128  key_width,
2129  target_info,
2130  target_logical_idx,
2131  translate_strings,
2133  entry_buff_idx);
2134 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:107
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:412
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:222
bool is_geometry() const
Definition: sqltypes.h:612
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
bool is_string() const
Definition: sqltypes.h:600
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:73
bool is_array() const
Definition: sqltypes.h:608
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

const std::pair< std::vector< int32_t >, std::vector< std::string > > ResultSet::getUniqueStringsForDictEncodedTargetCol ( const size_t  col_idx) const

Definition at line 1315 of file ResultSet.cpp.

References catalog_(), CHECK, and inline_fixed_encoding_null_val().

1315  {
1316  const auto col_type_info = getColType(col_idx);
1317  std::unordered_set<int32_t> unique_string_ids_set;
1318  const size_t num_entries = entryCount();
1319  std::vector<bool> targets_to_skip(colCount(), true);
1320  targets_to_skip[col_idx] = false;
1321  CHECK(col_type_info.is_dict_encoded_type()); // Array<Text> or Text
1322  const int64_t null_val = inline_fixed_encoding_null_val(
1323  col_type_info.is_array() ? col_type_info.get_elem_type() : col_type_info);
1324 
1325  for (size_t row_idx = 0; row_idx < num_entries; ++row_idx) {
1326  const auto result_row = getRowAtNoTranslations(row_idx, targets_to_skip);
1327  if (!result_row.empty()) {
1328  if (const auto scalar_col_val =
1329  boost::get<ScalarTargetValue>(&result_row[col_idx])) {
1330  const int32_t string_id =
1331  static_cast<int32_t>(boost::get<int64_t>(*scalar_col_val));
1332  if (string_id != null_val) {
1333  unique_string_ids_set.emplace(string_id);
1334  }
1335  } else if (const auto array_col_val =
1336  boost::get<ArrayTargetValue>(&result_row[col_idx])) {
1337  if (*array_col_val) {
1338  for (const ScalarTargetValue& scalar : array_col_val->value()) {
1339  const int32_t string_id = static_cast<int32_t>(boost::get<int64_t>(scalar));
1340  if (string_id != null_val) {
1341  unique_string_ids_set.emplace(string_id);
1342  }
1343  }
1344  }
1345  }
1346  }
1347  }
1348 
1349  const size_t num_unique_strings = unique_string_ids_set.size();
1350  std::vector<int32_t> unique_string_ids(num_unique_strings);
1351  size_t string_idx{0};
1352  for (const auto unique_string_id : unique_string_ids_set) {
1353  unique_string_ids[string_idx++] = unique_string_id;
1354  }
1355 
1356  const int32_t dict_id = col_type_info.get_comp_param();
1357  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1358  dict_id, /*with_generation=*/true, catalog_);
1359  CHECK(sdp);
1360 
1361  return std::make_pair(unique_string_ids, sdp->getStrings(unique_string_ids));
1362 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
size_t colCount() const
Definition: ResultSet.cpp:413
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
SQLTypeInfo getColType(const size_t col_idx) const
Definition: ResultSet.cpp:417
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:222
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 625 of file ResultSetIteration.cpp.

References CHECK, CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), GPU, query_mem_desc_, and row_set_mem_owner_.

626  {
627  char* host_str_ptr{nullptr};
628  std::vector<int8_t> cpu_buffer;
630  cpu_buffer.resize(str_len);
631  const auto executor = query_mem_desc_.getExecutor();
632  CHECK(executor);
633  auto data_mgr = executor->getDataMgr();
634  auto allocator = std::make_unique<CudaAllocator>(
635  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
636  allocator->copyFromDevice(
637  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
638  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
639  } else {
641  host_str_ptr = reinterpret_cast<char*>(str_ptr);
642  }
643  std::string str(host_str_ptr, str_len);
644  return InternalTargetValue(row_set_mem_owner_->addString(str));
645 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:901

+ Here is the call graph for this function:

const VarlenOutputInfo * ResultSet::getVarlenOutputInfo ( const size_t  entry_idx) const
private

Definition at line 1111 of file ResultSetIteration.cpp.

References CHECK, and findStorage().

Referenced by makeGeoTargetValue().

1111  {
1112  auto storage_lookup_result = findStorage(entry_idx);
1113  CHECK(storage_lookup_result.storage_ptr);
1114  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1115 }
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
#define CHECK(condition)
Definition: Logger.h:222

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::hasValidBuffer ( ) const
inline

Definition at line 507 of file ResultSet.h.

References storage_.

507  {
508  if (storage_) {
509  return true;
510  }
511  return false;
512  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 422 of file ResultSet.h.

References chunk_iters_.

422  {
423  chunk_iters_.push_back(chunk_iters);
424  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:918
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 419 of file ResultSet.h.

References chunks_.

419  {
420  chunks_ = chunks;
421  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:917
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 425 of file ResultSet.h.

References literal_buffers_.

425  {
426  literal_buffers_.push_back(std::move(literal_buff));
427  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:921
void ResultSet::initializeStorage ( ) const

Definition at line 1043 of file ResultSetReduction.cpp.

1043  {
1045  storage_->initializeColWise();
1046  } else {
1047  storage_->initializeRowWise();
1048  }
1049 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
PermutationView ResultSet::initPermutationBuffer ( PermutationView  permutation,
PermutationIdx const  begin,
PermutationIdx const  end 
) const
private

Definition at line 846 of file ResultSet.cpp.

References CHECK, DEBUG_TIMER, and VectorView< T >::push_back().

848  {
849  auto timer = DEBUG_TIMER(__func__);
850  for (PermutationIdx i = begin; i < end; ++i) {
851  const auto storage_lookup_result = findStorage(i);
852  const auto lhs_storage = storage_lookup_result.storage_ptr;
853  const auto off = storage_lookup_result.fixedup_entry_idx;
854  CHECK(lhs_storage);
855  if (!lhs_storage->isEmptyEntry(off)) {
856  permutation.push_back(i);
857  }
858  }
859  return permutation;
860 }
DEVICE void push_back(T const &value)
Definition: VectorView.h:73
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define CHECK(condition)
Definition: Logger.h:222
#define DEBUG_TIMER(name)
Definition: Logger.h:371

+ Here is the call graph for this function:

void ResultSet::initStatus ( )
inline

Definition at line 452 of file ResultSet.h.

References clearPermutation(), crt_row_buff_idx_, drop_first_, fetched_so_far_, invalidateCachedRowCount(), keep_first_, setGeoReturnType(), and WktString.

452  {
453  // todo(yoonmin): what else we additionally need to consider
454  // to make completely clear status of the resultset for reuse?
455  crt_row_buff_idx_ = 0;
456  fetched_so_far_ = 0;
460  drop_first_ = 0;
461  keep_first_ = 0;
462  }
void setGeoReturnType(const GeoReturnType val)
Definition: ResultSet.h:527
size_t keep_first_
Definition: ResultSet.h:908
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:907
size_t fetched_so_far_
Definition: ResultSet.h:906
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
void clearPermutation()
Definition: ResultSet.h:446

+ Here is the call graph for this function:

void ResultSet::invalidateCachedRowCount ( ) const

Definition at line 605 of file ResultSet.cpp.

References uninitialized_cached_row_count.

Referenced by initStatus().

605  {
607 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50

+ Here is the caller graph for this function:

void ResultSet::invalidateResultSetChunks ( )
inline

Definition at line 464 of file ResultSet.h.

References chunk_iters_, and chunks_.

464  {
465  if (!chunks_.empty()) {
466  chunks_.clear();
467  }
468  if (!chunk_iters_.empty()) {
469  chunk_iters_.clear();
470  }
471  };
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:918
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:917
const bool ResultSet::isCached ( ) const
inline

Definition at line 477 of file ResultSet.h.

References cached_.

477 { return cached_; }
bool cached_
Definition: ResultSet.h:947
bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1371 of file ResultSet.cpp.

References CHECK, g_enable_direct_columnarization, GroupByBaselineHash, GroupByPerfectHash, Projection, and TableFunction.

Referenced by copyColumnIntoBuffer().

1371  {
1373  return false;
1374  } else if (query_mem_desc_.didOutputColumnar()) {
1375  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1383  } else {
1386  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1390  }
1391 }
Permutation permutation_
Definition: ResultSet.h:910
bool g_enable_direct_columnarization
Definition: Execute.cpp:122
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
QueryDescriptionType getQueryDescriptionType() const
#define CHECK(condition)
Definition: Logger.h:222

+ Here is the caller graph for this function:

bool ResultSet::isEmpty ( ) const

Returns a boolean signifying whether there are valid entries in the result set.

Note a result set can be logically empty even if the value returned by ResultSet::entryCount() is > 0, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function is just implemented as ResultSet::rowCount() == 0, which caches it's value so the row count will only be computed once per finalized result set.

Definition at line 649 of file ResultSet.cpp.

649  {
650  // To simplify this function and de-dup logic with ResultSet::rowCount()
651  // (mismatches between the two were causing bugs), we modified this function
652  // to simply fetch rowCount(). The potential downside of this approach is that
653  // in some cases more work will need to be done, as we can't just stop at the first row.
654  // Mitigating that for most cases is the following:
655  // 1) rowCount() is cached, so the logic for actually computing row counts will run only
656  // once
657  // per result set.
658  // 2) If the cache is empty (cached_row_count_ == -1), rowCount() will use parallel
659  // methods if deemed appropriate, which in many cases could be faster for a sparse
660  // large result set that single-threaded iteration from the beginning
661  // 3) Often where isEmpty() is needed, rowCount() is also needed. Since the first call
662  // to rowCount()
663  // will be cached, there is no extra overhead in these cases
664 
665  return rowCount() == size_t(0);
666 }
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:593
const bool ResultSet::isEstimator ( ) const
inline

Definition at line 473 of file ResultSet.h.

References estimator_.

473 { return !estimator_; }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:927
bool ResultSet::isExplain ( ) const

Definition at line 740 of file ResultSet.cpp.

740  {
741  return just_explain_;
742 }
const bool just_explain_
Definition: ResultSet.h:938
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1437 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1437  {
1438  // This should match the logic in makeGeoTargetValue which ultimately calls
1439  // fetch_data_from_gpu when the geo column is on the device.
1440  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1441  // utility function that handles this logic in one place
1442  CHECK_LT(col_idx, targets_.size());
1443  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1444  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1445  " is not a geo column. It is of type " +
1446  targets_[col_idx].sql_type.get_type_name() + ".");
1447  }
1448 
1449  const auto& target_info = targets_[col_idx];
1450  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1451  return false;
1452  }
1453 
1454  if (!lazy_fetch_info_.empty()) {
1455  CHECK_LT(col_idx, lazy_fetch_info_.size());
1456  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1457  return false;
1458  }
1459  }
1460 
1462 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
#define CHECK_LT(x, y)
Definition: Logger.h:232
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
#define IS_GEO(T)
Definition: sqltypes.h:323

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2281 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), and null_val_bit_pattern().

2283  {
2284  if (ti.get_notnull()) {
2285  return false;
2286  }
2287  if (val.isInt()) {
2288  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2289  }
2290  if (val.isPair()) {
2291  return !val.i2;
2292  }
2293  if (val.isStr()) {
2294  return !val.i1;
2295  }
2296  CHECK(val.isNull());
2297  return true;
2298 }
bool isPair() const
Definition: TargetValue.h:65
bool isStr() const
Definition: TargetValue.h:69
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isNull() const
Definition: TargetValue.h:67
bool isInt() const
Definition: TargetValue.h:63
#define CHECK(condition)
Definition: Logger.h:222
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:411

+ Here is the call graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 434 of file ResultSet.h.

References permutation_.

434 { return permutation_.empty(); };
Permutation permutation_
Definition: ResultSet.h:910
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 284 of file ResultSetIteration.cpp.

284  {
285  if (logical_index >= entryCount()) {
286  return true;
287  }
288  const auto entry_idx =
289  permutation_.empty() ? logical_index : permutation_[logical_index];
290  const auto storage_lookup_result = findStorage(entry_idx);
291  const auto storage = storage_lookup_result.storage_ptr;
292  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293  return storage->isEmptyEntry(local_entry_idx);
294 }
Permutation permutation_
Definition: ResultSet.h:910
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool ResultSet::isTruncated ( ) const

Definition at line 736 of file ResultSet.cpp.

736  {
737  return keep_first_ + drop_first_;
738 }
size_t keep_first_
Definition: ResultSet.h:908
size_t drop_first_
Definition: ResultSet.h:907
bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 748 of file ResultSet.cpp.

748  {
749  return for_validation_only_;
750 }
bool for_validation_only_
Definition: ResultSet.h:939
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1393 of file ResultSet.cpp.

References Projection, and TableFunction.

1393  {
1398  appended_storage_.empty() && storage_ &&
1399  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1400 }
AppendedStorage appended_storage_
Definition: ResultSet.h:904
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
QueryDescriptionType getQueryDescriptionType() const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 52 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

52  {
54  keep_first_ = n;
55 }
size_t keep_first_
Definition: ResultSet.h:908
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
constexpr double n
Definition: Utm.h:38
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 647 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

649  {
650  if (!lazy_fetch_info_.empty()) {
651  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
652  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
653  if (col_lazy_fetch.is_lazily_fetched) {
654  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
655  col_buffers_.size());
656  int64_t ival_copy = ival;
657  auto& frag_col_buffers =
658  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
659  target_logical_idx,
660  ival_copy);
661  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
662  CHECK_LT(target_logical_idx, targets_.size());
663  const TargetInfo& target_info = targets_[target_logical_idx];
664  CHECK(!target_info.is_agg);
665  if (target_info.sql_type.is_string() &&
666  target_info.sql_type.get_compression() == kENCODING_NONE) {
667  VarlenDatum vd;
668  bool is_end{false};
670  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
671  storage_lookup_result.fixedup_entry_idx,
672  false,
673  &vd,
674  &is_end);
675  CHECK(!is_end);
676  if (vd.is_null) {
677  return 0;
678  }
679  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
680  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
681  }
682  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
683  }
684  }
685  return ival;
686 }
bool is_null
Definition: sqltypes.h:173
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
int8_t * pointer
Definition: sqltypes.h:172
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
#define CHECK_LT(x, y)
Definition: Logger.h:232
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
#define CHECK(condition)
Definition: Logger.h:222
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: sqltypes.h:171

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1468 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), getVarlenOutputInfo(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kENCODING_GEOINT, kLINESTRING, kMULTILINESTRING, kMULTIPOINT, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, QueryMemoryDescriptor::slotIsVarlenOutput(), TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1472  {
1473  CHECK(target_info.sql_type.is_geometry());
1474 
1475  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1476  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1477  };
1478 
1479  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1480  const auto storage_info = findStorage(entry_buff_idx);
1481  auto crt_geo_col_ptr = geo_target_ptr;
1482  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1483  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1484  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1485  }
1486  // adjusting the column pointer to represent a pointer to the geo target value
1487  return crt_geo_col_ptr +
1488  storage_info.fixedup_entry_idx *
1489  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1490  slot_idx + range);
1491  };
1492 
1493  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1495  ? getNextTargetBufferColWise(slot_idx, range)
1496  : getNextTargetBufferRowWise(slot_idx, range);
1497  };
1498 
1499  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1500  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1502  };
1503 
1504  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1505  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1507  };
1508 
1509  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1510  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1512  };
1513 
1514  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1515  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1517  };
1518 
1519  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1520  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1522  };
1523 
1524  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1525  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1527  };
1528 
1529  auto getFragColBuffers = [&]() -> decltype(auto) {
1530  const auto storage_idx = getStorageIndex(entry_buff_idx);
1531  CHECK_LT(storage_idx.first, col_buffers_.size());
1532  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1533  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1534  };
1535 
1536  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1537 
1538  auto getDataMgr = [&]() {
1539  auto executor = query_mem_desc_.getExecutor();
1540  CHECK(executor);
1541  return executor->getDataMgr();
1542  };
1543 
1544  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1545  const auto storage_idx = getStorageIndex(entry_buff_idx);
1546  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1547  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1548  return varlen_buffer;
1549  };
1550 
1551  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1552  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1553  return TargetValue(nullptr);
1554  }
1555 
1556  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1557  if (!lazy_fetch_info_.empty()) {
1558  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1559  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1560  }
1561 
1562  switch (target_info.sql_type.get_type()) {
1563  case kPOINT: {
1564  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1565  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1566  CHECK(varlen_output_info);
1567  auto geo_data_ptr = read_int_from_buff(
1568  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1569  auto cpu_data_ptr =
1570  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1571  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1572  target_info.sql_type,
1574  /*data_mgr=*/nullptr,
1575  /*is_gpu_fetch=*/false,
1576  device_id_,
1577  cpu_data_ptr,
1578  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1579  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1580  const auto& varlen_buffer = getSeparateVarlenStorage();
1581  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1582  varlen_buffer.size());
1583 
1584  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1585  target_info.sql_type,
1587  nullptr,
1588  false,
1589  device_id_,
1590  reinterpret_cast<int64_t>(
1591  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1592  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1593  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1594  const auto& frag_col_buffers = getFragColBuffers();
1595  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1596  target_info.sql_type,
1598  frag_col_buffers[col_lazy_fetch->local_col_id],
1599  getCoordsDataPtr(geo_target_ptr));
1600  } else {
1601  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1602  target_info.sql_type,
1604  is_gpu_fetch ? getDataMgr() : nullptr,
1605  is_gpu_fetch,
1606  device_id_,
1607  getCoordsDataPtr(geo_target_ptr),
1608  getCoordsLength(geo_target_ptr));
1609  }
1610  break;
1611  }
1612  case kMULTIPOINT: {
1613  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1614  const auto& varlen_buffer = getSeparateVarlenStorage();
1615  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1616  varlen_buffer.size());
1617 
1618  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1619  target_info.sql_type,
1621  nullptr,
1622  false,
1623  device_id_,
1624  reinterpret_cast<int64_t>(
1625  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1626  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1627  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1628  const auto& frag_col_buffers = getFragColBuffers();
1629  return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1630  target_info.sql_type,
1632  frag_col_buffers[col_lazy_fetch->local_col_id],
1633  getCoordsDataPtr(geo_target_ptr));
1634  } else {
1635  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1636  target_info.sql_type,
1638  is_gpu_fetch ? getDataMgr() : nullptr,
1639  is_gpu_fetch,
1640  device_id_,
1641  getCoordsDataPtr(geo_target_ptr),
1642  getCoordsLength(geo_target_ptr));
1643  }
1644  break;
1645  }
1646  case kLINESTRING: {
1647  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1648  const auto& varlen_buffer = getSeparateVarlenStorage();
1649  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1650  varlen_buffer.size());
1651 
1652  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1653  target_info.sql_type,
1655  nullptr,
1656  false,
1657  device_id_,
1658  reinterpret_cast<int64_t>(
1659  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1660  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1661  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1662  const auto& frag_col_buffers = getFragColBuffers();
1663  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1664  target_info.sql_type,
1666  frag_col_buffers[col_lazy_fetch->local_col_id],
1667  getCoordsDataPtr(geo_target_ptr));
1668  } else {
1669  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1670  target_info.sql_type,
1672  is_gpu_fetch ? getDataMgr() : nullptr,
1673  is_gpu_fetch,
1674  device_id_,
1675  getCoordsDataPtr(geo_target_ptr),
1676  getCoordsLength(geo_target_ptr));
1677  }
1678  break;
1679  }
1680  case kMULTILINESTRING: {
1681  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1682  const auto& varlen_buffer = getSeparateVarlenStorage();
1683  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1684  varlen_buffer.size());
1685 
1686  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1687  target_info.sql_type,
1689  nullptr,
1690  false,
1691  device_id_,
1692  reinterpret_cast<int64_t>(
1693  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1694  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1695  reinterpret_cast<int64_t>(
1696  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1697  static_cast<int64_t>(
1698  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1699  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1700  const auto& frag_col_buffers = getFragColBuffers();
1701 
1702  return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1703  target_info.sql_type,
1705  frag_col_buffers[col_lazy_fetch->local_col_id],
1706  getCoordsDataPtr(geo_target_ptr),
1707  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1708  getCoordsDataPtr(geo_target_ptr));
1709  } else {
1710  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1711  target_info.sql_type,
1713  is_gpu_fetch ? getDataMgr() : nullptr,
1714  is_gpu_fetch,
1715  device_id_,
1716  getCoordsDataPtr(geo_target_ptr),
1717  getCoordsLength(geo_target_ptr),
1718  getRingSizesPtr(geo_target_ptr),
1719  getRingSizesLength(geo_target_ptr) * 4);
1720  }
1721  break;
1722  }
1723  case kPOLYGON: {
1724  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1725  const auto& varlen_buffer = getSeparateVarlenStorage();
1726  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1727  varlen_buffer.size());
1728 
1729  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1730  target_info.sql_type,
1732  nullptr,
1733  false,
1734  device_id_,
1735  reinterpret_cast<int64_t>(
1736  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1737  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1738  reinterpret_cast<int64_t>(
1739  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1740  static_cast<int64_t>(
1741  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1742  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1743  const auto& frag_col_buffers = getFragColBuffers();
1744 
1745  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1746  target_info.sql_type,
1748  frag_col_buffers[col_lazy_fetch->local_col_id],
1749  getCoordsDataPtr(geo_target_ptr),
1750  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1751  getCoordsDataPtr(geo_target_ptr));
1752  } else {
1753  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1754  target_info.sql_type,
1756  is_gpu_fetch ? getDataMgr() : nullptr,
1757  is_gpu_fetch,
1758  device_id_,
1759  getCoordsDataPtr(geo_target_ptr),
1760  getCoordsLength(geo_target_ptr),
1761  getRingSizesPtr(geo_target_ptr),
1762  getRingSizesLength(geo_target_ptr) * 4);
1763  }
1764  break;
1765  }
1766  case kMULTIPOLYGON: {
1767  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1768  const auto& varlen_buffer = getSeparateVarlenStorage();
1769  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1770  varlen_buffer.size());
1771 
1772  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1773  target_info.sql_type,
1775  nullptr,
1776  false,
1777  device_id_,
1778  reinterpret_cast<int64_t>(
1779  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1780  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1781  reinterpret_cast<int64_t>(
1782  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1783  static_cast<int64_t>(
1784  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1785  reinterpret_cast<int64_t>(
1786  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1787  static_cast<int64_t>(
1788  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1789  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1790  const auto& frag_col_buffers = getFragColBuffers();
1791 
1792  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1793  target_info.sql_type,
1795  frag_col_buffers[col_lazy_fetch->local_col_id],
1796  getCoordsDataPtr(geo_target_ptr),
1797  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1798  getCoordsDataPtr(geo_target_ptr),
1799  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1800  getCoordsDataPtr(geo_target_ptr));
1801  } else {
1802  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1803  target_info.sql_type,
1805  is_gpu_fetch ? getDataMgr() : nullptr,
1806  is_gpu_fetch,
1807  device_id_,
1808  getCoordsDataPtr(geo_target_ptr),
1809  getCoordsLength(geo_target_ptr),
1810  getRingSizesPtr(geo_target_ptr),
1811  getRingSizesLength(geo_target_ptr) * 4,
1812  getPolyRingsPtr(geo_target_ptr),
1813  getPolyRingsLength(geo_target_ptr) * 4);
1814  }
1815  break;
1816  }
1817  default:
1818  throw std::runtime_error("Unknown Geometry type encountered: " +
1819  target_info.sql_type.get_type_name());
1820  }
1821  UNREACHABLE();
1822  return TargetValue(nullptr);
1823 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define UNREACHABLE()
Definition: Logger.h:266
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:935
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:404
bool is_agg
Definition: TargetInfo.h:50
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:232
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:412
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
std::string get_type_name() const
Definition: sqltypes.h:528
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
bool is_geometry() const
Definition: sqltypes.h:612
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:901

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1826 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, calculateQuantile(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAPPROX_QUANTILE, kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, result_set::lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1832  {
1833  auto actual_compact_sz = compact_sz;
1834  const auto& type_info = target_info.sql_type;
1835  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1837  actual_compact_sz = sizeof(float);
1838  } else {
1839  actual_compact_sz = sizeof(double);
1840  }
1841  if (target_info.is_agg &&
1842  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1843  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1844  target_info.agg_kind == kSINGLE_VALUE)) {
1845  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1846  // padded size to 4 bytes to properly read each value.
1847  actual_compact_sz = sizeof(float);
1848  }
1849  }
1850  if (get_compact_type(target_info).is_date_in_days()) {
1851  // Dates encoded in days are converted to 8 byte values on read.
1852  actual_compact_sz = sizeof(int64_t);
1853  }
1854 
1855  // String dictionary keys are read as 32-bit values regardless of encoding
1856  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1857  type_info.get_comp_param()) {
1858  actual_compact_sz = sizeof(int32_t);
1859  }
1860 
1861  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1862  const auto& chosen_type = get_compact_type(target_info);
1863  if (!lazy_fetch_info_.empty()) {
1864  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1865  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1866  if (col_lazy_fetch.is_lazily_fetched) {
1867  CHECK_GE(ival, 0);
1868  const auto storage_idx = getStorageIndex(entry_buff_idx);
1869  CHECK_LT(storage_idx.first, col_buffers_.size());
1870  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1871  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1872  ival = result_set::lazy_decode(
1873  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1874  if (chosen_type.is_fp()) {
1875  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1876  if (chosen_type.get_type() == kFLOAT) {
1877  return ScalarTargetValue(static_cast<float>(dval));
1878  } else {
1879  return ScalarTargetValue(dval);
1880  }
1881  }
1882  }
1883  }
1884  if (chosen_type.is_fp()) {
1885  if (target_info.agg_kind == kAPPROX_QUANTILE) {
1886  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
1887  ? NULL_DOUBLE // sql_validate / just_validate
1888  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
1889  }
1890  switch (actual_compact_sz) {
1891  case 8: {
1892  const auto dval = *reinterpret_cast<const double*>(ptr);
1893  return chosen_type.get_type() == kFLOAT
1894  ? ScalarTargetValue(static_cast<const float>(dval))
1895  : ScalarTargetValue(dval);
1896  }
1897  case 4: {
1898  CHECK_EQ(kFLOAT, chosen_type.get_type());
1899  return *reinterpret_cast<const float*>(ptr);
1900  }
1901  default:
1902  CHECK(false);
1903  }
1904  }
1905  if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
1906  chosen_type.is_timeinterval()) {
1907  if (is_distinct_target(target_info)) {
1909  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1910  }
1911  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1912  // right type instead
1913  if (inline_int_null_val(chosen_type) ==
1914  int_resize_cast(ival, chosen_type.get_logical_size())) {
1915  return inline_int_null_val(type_info);
1916  }
1917  return ival;
1918  }
1919  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1920  if (translate_strings) {
1921  if (static_cast<int32_t>(ival) ==
1922  NULL_INT) { // TODO(alex): this isn't nice, fix it
1923  return NullableString(nullptr);
1924  }
1925  StringDictionaryProxy* sdp{nullptr};
1926  if (!chosen_type.get_comp_param()) {
1927  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1928  } else {
1929  sdp = catalog_
1930  ? row_set_mem_owner_->getOrAddStringDictProxy(
1931  chosen_type.get_comp_param(), /*with_generation=*/false, catalog_)
1932  : row_set_mem_owner_->getStringDictProxy(
1933  chosen_type.get_comp_param()); // unit tests bypass the catalog
1934  }
1935  return NullableString(sdp->getString(ival));
1936  } else {
1937  return static_cast<int64_t>(static_cast<int32_t>(ival));
1938  }
1939  }
1940  if (chosen_type.is_decimal()) {
1941  if (decimal_to_double) {
1942  if (target_info.is_agg &&
1943  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1944  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1945  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1946  return NULL_DOUBLE;
1947  }
1948  if (!chosen_type.get_notnull() &&
1949  ival ==
1950  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1951  return NULL_DOUBLE;
1952  }
1953  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1954  }
1955  return ival;
1956  }
1957  CHECK(false);
1958  return TargetValue(int64_t(0));
1959 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define CHECK_GE(x, y)
Definition: Logger.h:235
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
#define NULL_INT
Definition: sqldefs.h:74
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
bool is_agg
Definition: TargetInfo.h:50
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
Definition: sqldefs.h:76
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:107
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1008
SQLAgg agg_kind
Definition: TargetInfo.h:51
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:499
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:232
bool is_date_in_days() const
Definition: sqltypes.h:998
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
#define CHECK(condition)
Definition: Logger.h:222
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
Definition: sqldefs.h:75
Definition: sqldefs.h:73
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1308 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), FlatBufferManager::getItem(), getQueryEngineCudaStreamForDevice(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), FlatBufferManager::isFlatBuffer(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1315  {
1316  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1317  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1318  if (varlen_ptr < 0) {
1319  CHECK_EQ(-1, varlen_ptr);
1320  if (target_info.sql_type.get_type() == kARRAY) {
1321  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1322  }
1323  return TargetValue(nullptr);
1324  }
1325  const auto storage_idx = getStorageIndex(entry_buff_idx);
1326  if (target_info.sql_type.is_string()) {
1327  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1328  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1329  const auto& varlen_buffer_for_storage =
1330  serialized_varlen_buffer_[storage_idx.first];
1331  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1332  return varlen_buffer_for_storage[varlen_ptr];
1333  } else if (target_info.sql_type.get_type() == kARRAY) {
1334  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1335  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1336  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1337 
1338  return build_array_target_value(
1339  target_info.sql_type,
1340  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1341  varlen_buffer[varlen_ptr].size(),
1342  translate_strings,
1344  catalog_);
1345  } else {
1346  CHECK(false);
1347  }
1348  }
1349  if (!lazy_fetch_info_.empty()) {
1350  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1351  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1352  if (col_lazy_fetch.is_lazily_fetched) {
1353  const auto storage_idx = getStorageIndex(entry_buff_idx);
1354  CHECK_LT(storage_idx.first, col_buffers_.size());
1355  auto& frag_col_buffers =
1356  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1357  bool is_end{false};
1358  auto col_buf = const_cast<int8_t*>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1359  if (target_info.sql_type.is_string()) {
1360  VarlenDatum vd;
1362  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, false, &vd, &is_end);
1363  CHECK(!is_end);
1364  if (vd.is_null) {
1365  return TargetValue(nullptr);
1366  }
1367  CHECK(vd.pointer);
1368  CHECK_GT(vd.length, 0u);
1369  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1370  return fetched_str;
1371  } else {
1372  CHECK(target_info.sql_type.is_array());
1373  ArrayDatum ad;
1374  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1375  FlatBufferManager m{col_buf};
1376  int64_t length;
1377  auto status = m.getItem(varlen_ptr, length, ad.pointer, ad.is_null);
1378  CHECK_EQ(status, FlatBufferManager::Status::Success);
1379  CHECK_GE(length, 0);
1380  ad.length = length;
1381  } else {
1383  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1384  }
1385  CHECK(!is_end);
1386  if (ad.is_null) {
1387  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1388  }
1389  CHECK_GE(ad.length, 0u);
1390  if (ad.length > 0) {
1391  CHECK(ad.pointer);
1392  }
1393  return build_array_target_value(target_info.sql_type,
1394  ad.pointer,
1395  ad.length,
1396  translate_strings,
1398  catalog_);
1399  }
1400  }
1401  }
1402  if (!varlen_ptr) {
1403  if (target_info.sql_type.is_array()) {
1404  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1405  }
1406  return TargetValue(nullptr);
1407  }
1408  auto length = read_int_from_buff(ptr2, compact_sz2);
1409  if (target_info.sql_type.is_array()) {
1410  const auto& elem_ti = target_info.sql_type.get_elem_type();
1411  length *= elem_ti.get_array_context_logical_size();
1412  }
1413  std::vector<int8_t> cpu_buffer;
1414  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1415  cpu_buffer.resize(length);
1416  const auto executor = query_mem_desc_.getExecutor();
1417  CHECK(executor);
1418  auto data_mgr = executor->getDataMgr();
1419  auto allocator = std::make_unique<CudaAllocator>(
1420  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1421 
1422  allocator->copyFromDevice(
1423  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1424  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1425  }
1426  if (target_info.sql_type.is_array()) {
1427  return build_array_target_value(target_info.sql_type,
1428  reinterpret_cast<const int8_t*>(varlen_ptr),
1429  length,
1430  translate_strings,
1432  catalog_);
1433  }
1434  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1435 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
bool is_null
Definition: sqltypes.h:173
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define CHECK_GE(x, y)
Definition: Logger.h:235
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:935
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:404
#define CHECK_GT(x, y)
Definition: Logger.h:234
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: sqltypes.h:172
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:228
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
bool is_agg
Definition: TargetInfo.h:50
static bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:184
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:181
Status getItem(int64_t index, int64_t &size, int8_t *&dest, bool &is_null)
Definition: FlatBuffer.h:595
#define CHECK_LT(x, y)
Definition: Logger.h:232
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:412
int get_array_context_logical_size() const
Definition: sqltypes.h:698
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
#define CHECK(condition)
Definition: Logger.h:222
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
bool is_string() const
Definition: sqltypes.h:600
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:981
bool is_array() const
Definition: sqltypes.h:608
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
size_t length
Definition: sqltypes.h:171
const int device_id_
Definition: ResultSet.h:901

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 731 of file ResultSet.cpp.

731  {
732  crt_row_buff_idx_ = 0;
733  fetched_so_far_ = 0;
734 }
size_t fetched_so_far_
Definition: ResultSet.h:906
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 629 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), threading_serial::parallel_reduce(), logger::query_id(), and logger::set_thread_local_query_id().

629  {
630  using namespace threading;
631  auto execute_parallel_row_count = [this, query_id = logger::query_id()](
632  const blocked_range<size_t>& r,
633  size_t row_count) {
634  auto qid_scope_guard = logger::set_thread_local_query_id(query_id);
635  for (size_t i = r.begin(); i < r.end(); ++i) {
636  if (!isRowAtEmpty(i)) {
637  ++row_count;
638  }
639  }
640  return row_count;
641  };
642  const auto row_count = parallel_reduce(blocked_range<size_t>(0, entryCount()),
643  size_t(0),
644  execute_parallel_row_count,
645  std::plus<int>());
646  return get_truncated_row_count(row_count, getLimit(), drop_first_);
647 }
QidScopeGuard set_thread_local_query_id(QueryId const query_id)
Definition: Logger.cpp:487
size_t getLimit() const
Definition: ResultSet.cpp:1302
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:907
Value parallel_reduce(const blocked_range< Int > &range, const Value &identity, const RealBody &real_body, const Reduction &reduction, const Partitioner &p=Partitioner())
Parallel iteration with reduction.
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
QueryId query_id()
Definition: Logger.cpp:473

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 866 of file ResultSet.cpp.

References gpu_enabled::copy(), cpu_threads(), DEBUG_TIMER, logger::query_id(), threading_std::task_group::run(), logger::set_thread_local_query_id(), and threading_std::task_group::wait().

868  {
869  auto timer = DEBUG_TIMER(__func__);
870  const size_t nthreads = cpu_threads();
871 
872  // Split permutation_ into nthreads subranges and top-sort in-place.
874  std::vector<PermutationView> permutation_views(nthreads);
875  threading::task_group top_sort_threads;
876  for (auto interval : makeIntervals<PermutationIdx>(0, permutation_.size(), nthreads)) {
877  top_sort_threads.run([this,
878  &order_entries,
879  &permutation_views,
880  top_n,
881  executor,
883  interval] {
884  auto qid_scope_guard = logger::set_thread_local_query_id(query_id);
885  PermutationView pv(permutation_.data() + interval.begin, 0, interval.size());
886  pv = initPermutationBuffer(pv, interval.begin, interval.end);
887  const auto compare = createComparator(order_entries, pv, executor, true);
888  permutation_views[interval.index] = topPermutation(pv, top_n, compare);
889  });
890  }
891  top_sort_threads.wait();
892 
893  // In case you are considering implementing a parallel reduction, note that the
894  // ResultSetComparator constructor is O(N) in order to materialize some of the aggregate
895  // columns as necessary to perform a comparison. This cost is why reduction is chosen to
896  // be serial instead; only one more Comparator is needed below.
897 
898  // Left-copy disjoint top-sorted subranges into one contiguous range.
899  // ++++....+++.....+++++... -> ++++++++++++............
900  auto end = permutation_.begin() + permutation_views.front().size();
901  for (size_t i = 1; i < nthreads; ++i) {
902  std::copy(permutation_views[i].begin(), permutation_views[i].end(), end);
903  end += permutation_views[i].size();
904  }
905 
906  // Top sort final range.
907  PermutationView pv(permutation_.data(), end - permutation_.begin());
908  const auto compare = createComparator(order_entries, pv, executor, false);
909  pv = topPermutation(pv, top_n, compare);
910  permutation_.resize(pv.size());
911  permutation_.shrink_to_fit();
912 }
QidScopeGuard set_thread_local_query_id(QueryId const query_id)
Definition: Logger.cpp:487
Permutation permutation_
Definition: ResultSet.h:910
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:846
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:834
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1208
QueryId query_id()
Definition: Logger.cpp:473
#define DEBUG_TIMER(name)
Definition: Logger.h:371
int cpu_threads()
Definition: thread_count.h:25

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1262 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

1263  {
1264  auto timer = DEBUG_TIMER(__func__);
1266  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
1267  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
1268  CHECK_EQ(size_t(1), order_entries.size());
1269  auto buffer_ptr = storage_->getUnderlyingBuffer();
1270  for (const auto& order_entry : order_entries) {
1271  const auto target_idx = order_entry.tle_no - 1;
1272  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
1273  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1274  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1275  sort_groups_cpu(sortkey_val_buff,
1276  &idx_buff[0],
1278  order_entry.is_desc,
1279  chosen_bytes);
1280  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
1281  &idx_buff[0],
1283  &tmp_buff[0],
1284  sizeof(int64_t));
1285  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
1286  ++target_idx) {
1287  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
1288  continue;
1289  }
1290  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1291  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
1292  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1293  apply_permutation_cpu(satellite_val_buff,
1294  &idx_buff[0],
1296  &tmp_buff[0],
1297  chosen_bytes);
1298  }
1299  }
1300 }
#define CHECK_EQ(x, y)
Definition: Logger.h:230
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:222
#define DEBUG_TIMER(name)
Definition: Logger.h:371
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1222 of file ResultSet.cpp.

References catalog_(), CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, getQueryEngineCudaStreamForDevice(), GPU, inplace_sort_gpu(), and KernelPerFragment.

1223  {
1224  auto timer = DEBUG_TIMER(__func__);
1225  auto data_mgr = &catalog_->getDataMgr();
1226  const int device_id{0};
1227  auto allocator = std::make_unique<CudaAllocator>(
1228  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
1229  CHECK_GT(block_size_, 0);
1230  CHECK_GT(grid_size_, 0);
1231  std::vector<int64_t*> group_by_buffers(block_size_);
1232  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
1233  auto dev_group_by_buffers =
1234  create_dev_group_by_buffers(allocator.get(),
1235  group_by_buffers,
1237  block_size_,
1238  grid_size_,
1239  device_id,
1241  /*num_input_rows=*/-1,
1242  /*prepend_index_buffer=*/true,
1243  /*always_init_group_by_on_host=*/true,
1244  /*use_bump_allocator=*/false,
1245  /*has_varlen_output=*/false,
1246  /*insitu_allocator*=*/nullptr);
1248  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
1250  *allocator,
1251  group_by_buffers,
1252  query_mem_desc_.getBufferSizeBytes(ExecutorDeviceType::GPU),
1253  dev_group_by_buffers.data,
1255  block_size_,
1256  grid_size_,
1257  device_id,
1258  /*use_bump_allocator=*/false,
1259  /*has_varlen_output=*/false);
1260 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *device_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, const bool has_varlen_output, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:70
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:243
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:234
unsigned block_size_
Definition: ResultSet.h:913
unsigned grid_size_
Definition: ResultSet.h:914
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
#define DEBUG_TIMER(name)
Definition: Logger.h:371
void copy_group_by_buffers_from_gpu(DeviceAllocator &device_allocator, const std::vector< int64_t * > &group_by_buffers, const size_t groups_buffer_size, const int8_t *group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer, const bool has_varlen_output)

+ Here is the call graph for this function:

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step)

Note that this can be less than or equal to the value returned by ResultSet::getEntries(), whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function references/sets a cached value (cached_row_count_) so that the cost of computing the result is only paid once per result set.

If the actual row count is not cached and needs to be computed, in some cases that can be O(1) (i.e. if limits and offsets are present, or for the output of a table function). For projections, we use a binary search, so it is O(log n), otherwise it is O(n) (with n being ResultSet::entryCount()), which will be run in parallel if the entry count >= the default of 20000 or if force_parallel is set to true

Note that we currently do not invalidate the cache if the result set is changed (i.e appended to), so this function should only be called after the result set is finalized.

Parameters
force_parallelForces the row count to be computed in parallel if the row count cannot be otherwise be computed from metadata or via a binary search (otherwise parallel search is automatically used for result sets with entryCount() >= 20000)

Definition at line 593 of file ResultSet.cpp.

References CHECK_GE, and uninitialized_cached_row_count.

593  {
594  // cached_row_count_ is atomic, so fetch it into a local variable first
595  // to avoid repeat fetches
596  const int64_t cached_row_count = cached_row_count_;
597  if (cached_row_count != uninitialized_cached_row_count) {
598  CHECK_GE(cached_row_count, 0);
599  return cached_row_count;
600  }
601  setCachedRowCount(rowCountImpl(force_parallel));
602  return cached_row_count_;
603 }
#define CHECK_GE(x, y)
Definition: Logger.h:235
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
size_t rowCountImpl(const bool force_parallel) const
Definition: ResultSet.cpp:555
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
void setCachedRowCount(const size_t row_count) const
Definition: ResultSet.cpp:609
size_t ResultSet::rowCountImpl ( const bool  force_parallel) const
private

Definition at line 555 of file ResultSet.cpp.

References CHECK, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), Projection, and TableFunction.

555  {
556  if (just_explain_) {
557  return 1;
558  }
560  return entryCount();
561  }
562  if (!permutation_.empty()) {
563  // keep_first_ corresponds to SQL LIMIT
564  // drop_first_ corresponds to SQL OFFSET
566  }
567  if (!storage_) {
568  return 0;
569  }
570  CHECK(permutation_.empty());
572  return binSearchRowCount();
573  }
574 
575  constexpr size_t auto_parallel_row_count_threshold{20000UL};
576  if (force_parallel || entryCount() >= auto_parallel_row_count_threshold) {
577  return parallelRowCount();
578  }
579  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
580  moveToBegin();
581  size_t row_count{0};
582  while (true) {
583  auto crt_row = getNextRowUnlocked(false, false);
584  if (crt_row.empty()) {
585  break;
586  }
587  ++row_count;
588  }
589  moveToBegin();
590  return row_count;
591 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:941
Permutation permutation_
Definition: ResultSet.h:910
void moveToBegin() const
Definition: ResultSet.cpp:731
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
size_t keep_first_
Definition: ResultSet.h:908
const bool just_explain_
Definition: ResultSet.h:938
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t parallelRowCount() const
Definition: ResultSet.cpp:629
size_t drop_first_
Definition: ResultSet.h:907
QueryDescriptionType getQueryDescriptionType() const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:222
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
size_t binSearchRowCount() const
Definition: ResultSet.cpp:616

+ Here is the call graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 203 of file ResultSet.h.

Referenced by rowIterator().

205  {
206  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
207 
208  // move to first logical position
209  ++rowIterator;
210 
211  for (size_t index = 0; index < from_logical_index; index++) {
212  ++rowIterator;
213  }
214 
215  return rowIterator;
216  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:203

+ Here is the caller graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 218 of file ResultSet.h.

References rowIterator().

219  {
220  return rowIterator(0, translate_strings, decimal_to_double);
221  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:203

+ Here is the call graph for this function:

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const
void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private
void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private
void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private
void ResultSet::setCached ( bool  val)
inline

Definition at line 475 of file ResultSet.h.

References cached_.

475 { cached_ = val; }
bool cached_
Definition: ResultSet.h:947
void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 609 of file ResultSet.cpp.

References CHECK, and uninitialized_cached_row_count.

609  {
610  const int64_t signed_row_count = static_cast<int64_t>(row_count);
611  const int64_t old_cached_row_count = cached_row_count_.exchange(signed_row_count);
612  CHECK(old_cached_row_count == uninitialized_cached_row_count ||
613  old_cached_row_count == signed_row_count);
614 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:940
#define CHECK(condition)
Definition: Logger.h:222
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:50
void ResultSet::setExecTime ( const long  exec_time)
inline

Definition at line 479 of file ResultSet.h.

References query_exec_time_.

479 { query_exec_time_ = exec_time; }
size_t query_exec_time_
Definition: ResultSet.h:949
void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 527 of file ResultSet.h.

References geo_return_type_.

Referenced by initStatus().

527 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:944

+ Here is the caller graph for this function:

void ResultSet::setInputTableKeys ( std::unordered_set< size_t > &&  intput_table_keys)
inline

Definition at line 489 of file ResultSet.h.

References input_table_keys_.

489  {
490  input_table_keys_ = std::move(intput_table_keys);
491  }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:951
void ResultSet::setKernelQueueTime ( const int64_t  kernel_queue_time)

Definition at line 714 of file ResultSet.cpp.

714  {
715  timings_.kernel_queue_time = kernel_queue_time;
716 }
QueryExecutionTimings timings_
Definition: ResultSet.h:915
void ResultSet::setQueryPlanHash ( const QueryPlanHash  query_plan)
inline

Definition at line 483 of file ResultSet.h.

References query_plan_.

483 { query_plan_ = query_plan; }
QueryPlanHash query_plan_
Definition: ResultSet.h:950
void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 710 of file ResultSet.cpp.

710  {
711  timings_.executor_queue_time = queue_time;
712 }
QueryExecutionTimings timings_
Definition: ResultSet.h:915
void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 569 of file ResultSet.h.

References separate_varlen_storage_valid_.

569  {
571  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
void ResultSet::setTargetMetaInfo ( const std::vector< TargetMetaInfo > &  target_meta_info)
inline

Definition at line 493 of file ResultSet.h.

References gpu_enabled::copy(), and target_meta_info_.

493  {
494  std::copy(target_meta_info.begin(),
495  target_meta_info.end(),
496  std::back_inserter(target_meta_info_));
497  }
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:952

+ Here is the call graph for this function:

void ResultSet::setUseSpeculativeTopNSort ( bool  value)
inline

Definition at line 505 of file ResultSet.h.

References can_use_speculative_top_n_sort.

std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:957
void ResultSet::setValidationOnlyRes ( )

Definition at line 744 of file ResultSet.cpp.

744  {
745  for_validation_only_ = true;
746 }
bool for_validation_only_
Definition: ResultSet.h:939
void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
size_t  top_n,
const Executor executor 
)

Definition at line 768 of file ResultSet.cpp.

References Executor::baseline_threshold, CHECK, DEBUG_TIMER, g_enable_watchdog, g_parallel_top_max, g_parallel_top_min, LOG, VectorView< T >::size(), and logger::WARNING.

770  {
771  auto timer = DEBUG_TIMER(__func__);
772 
773  if (!storage_) {
774  return;
775  }
777  CHECK(!targets_.empty());
778 #ifdef HAVE_CUDA
779  if (canUseFastBaselineSort(order_entries, top_n)) {
780  baselineSort(order_entries, top_n, executor);
781  return;
782  }
783 #endif // HAVE_CUDA
784  if (query_mem_desc_.sortOnGpu()) {
785  try {
786  radixSortOnGpu(order_entries);
787  } catch (const OutOfMemory&) {
788  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
789  radixSortOnCpu(order_entries);
790  } catch (const std::bad_alloc&) {
791  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
792  radixSortOnCpu(order_entries);
793  }
794  return;
795  }
796  // This check isn't strictly required, but allows the index buffer to be 32-bit.
797  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
798  throw RowSortException("Sorting more than 4B elements not supported");
799  }
800 
801  CHECK(permutation_.empty());
802 
803  if (top_n && g_parallel_top_min < entryCount()) {
805  throw WatchdogException("Sorting the result would be too slow");
806  }
807  parallelTop(order_entries, top_n, executor);
808  } else {
810  throw WatchdogException("Sorting the result would be too slow");
811  }
813  // PermutationView is used to share common API with parallelTop().
814  PermutationView pv(permutation_.data(), 0, permutation_.size());
815  pv = initPermutationBuffer(pv, 0, permutation_.size());
816  if (top_n == 0) {
817  top_n = pv.size(); // top_n == 0 implies a full sort
818  }
819  pv = topPermutation(pv, top_n, createComparator(order_entries, pv, executor, false));
820  if (pv.size() < permutation_.size()) {
821  permutation_.resize(pv.size());
822  permutation_.shrink_to_fit();
823  }
824  }
825 }
size_t g_parallel_top_max
Definition: ResultSet.cpp:48
Permutation permutation_
Definition: ResultSet.h:910
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:846
#define LOG(tag)
Definition: Logger.h:216
static const size_t baseline_threshold
Definition: Execute.h:1304
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
Definition: ResultSet.cpp:866
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:1262
size_t g_parallel_top_min
Definition: ResultSet.cpp:47
DEVICE size_type size() const
Definition: VectorView.h:83
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
bool g_enable_watchdog
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:834
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:1222
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1208
#define CHECK(condition)
Definition: Logger.h:222
#define DEBUG_TIMER(name)
Definition: Logger.h:371

+ Here is the call graph for this function:

std::string ResultSet::summaryToString ( ) const

Definition at line 218 of file ResultSet.cpp.

218  {
219  std::ostringstream oss;
220  oss << "Result Set Info" << std::endl;
221  oss << "\tLayout: " << query_mem_desc_.queryDescTypeToString() << std::endl;
222  oss << "\tColumns: " << colCount() << std::endl;
223  oss << "\tRows: " << rowCount() << std::endl;
224  oss << "\tEntry count: " << entryCount() << std::endl;
225  const std::string is_empty = isEmpty() ? "True"