OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowIterationState
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
std::string toString () const
 
std::string summaryToString () const
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &, std::shared_ptr< VarlenOutputInfo >=nullptr) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, size_t top_n, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step) More...
 
void invalidateCachedRowCount () const
 
void setCachedRowCount (const size_t row_count) const
 
bool isEmpty () const
 Returns a boolean signifying whether there are valid entries in the result set. More...
 
size_t entryCount () const
 Returns the number of entries the result set is allocated to hold. More...
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
std::string getExplanation () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
std::string getString (SQLTypeInfo const &, int64_t const ival) const
 
ScalarTargetValue convertToScalarTargetValue (SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
 
bool isLessThan (SQLTypeInfo const &, int64_t const lhs, int64_t const rhs) const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const PermutationgetPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
ResultSetPtr copy ()
 
void clearPermutation ()
 
void initStatus ()
 
void invalidateResultSetChunks ()
 
const bool isEstimator () const
 
void setCached (bool val)
 
const bool isCached () const
 
void setExecTime (const long exec_time)
 
const long getExecTime () const
 
void setQueryPlanHash (const QueryPlanHash query_plan)
 
const QueryPlanHash getQueryPlanHash ()
 
std::unordered_set< size_t > getInputTableKeys () const
 
void setInputTableKeys (std::unordered_set< size_t > &&intput_table_keys)
 
void setTargetMetaInfo (const std::vector< TargetMetaInfo > &target_meta_info)
 
std::vector< TargetMetaInfogetTargetMetaInfo ()
 
std::optional< bool > canUseSpeculativeTopNSort () const
 
void setUseSpeculativeTopNSort (bool value)
 
const bool hasValidBuffer () const
 
unsigned getBlockSize () const
 
unsigned getGridSize () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
bool areAnyColumnsLazyFetched () const
 
size_t getNumColumnsLazyFetched () const
 
void setSeparateVarlenStorageValid (const bool val)
 
const std::vector< std::string > getStringDictionaryPayloadCopy (const shared::StringDictKey &dict_key) const
 
const std::pair< std::vector
< int32_t >, std::vector
< std::string > > 
getUniqueStringsForDictEncodedTargetCol (const size_t col_idx) const
 
StringDictionaryProxygetStringDictionaryProxy (const shared::StringDictKey &dict_key) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
ChunkStats getTableFunctionChunkStats (const size_t target_idx) const
 
void translateDictEncodedColumns (std::vector< TargetInfo > const &, size_t const start_idx)
 
void eachCellInColumn (RowIterationState &, CellCallback const &)
 
const ExecutorgetExecutor () const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static bool isNullIval (SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
 
static ScalarTargetValue nullScalarTargetValue (SQLTypeInfo const &, bool const translate_strings)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 
static double calculateQuantile (quantile::TDigest *const t_digest)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using ApproxQuantileBuffers = std::vector< std::vector< double >>
 
using ModeBuffers = std::vector< std::vector< int64_t >>
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
ScalarTargetValue makeStringTargetValue (SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
const VarlenOutputInfogetVarlenOutputInfo (const size_t entry_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
Comparator createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
 
PermutationView initPermutationBuffer (PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
size_t rowCountImpl (const bool force_parallel) const
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (CountDistinctSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static PermutationView topPermutation (PermutationView, const size_t n, const Comparator &)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
Permutation permutation_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
bool cached_
 
size_t query_exec_time_
 
QueryPlanHash query_plan_
 
std::unordered_set< size_t > input_table_keys_
 
std::vector< TargetMetaInfotarget_meta_info_
 
std::optional< bool > can_use_speculative_top_n_sort
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 157 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::ApproxQuantileBuffers = std::vector<std::vector<double>>
private

Definition at line 821 of file ResultSet.h.

using ResultSet::ModeBuffers = std::vector<std::vector<int64_t>>
private

Definition at line 822 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 967 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 539 of file ResultSet.h.

539  {
542  WktString,
545  GeoTargetValueGpuPtr
547  };
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:187
boost::variant< GeoPointTargetValuePtr, GeoMultiPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoMultiLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:193

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 64 of file ResultSet.cpp.

70  : targets_(targets)
71  , device_type_(device_type)
72  , device_id_(-1)
73  , query_mem_desc_(query_mem_desc)
75  , fetched_so_far_(0)
76  , drop_first_(0)
77  , keep_first_(0)
78  , row_set_mem_owner_(row_set_mem_owner)
79  , block_size_(block_size)
80  , grid_size_(grid_size)
81  , data_mgr_(nullptr)
83  , just_explain_(false)
84  , for_validation_only_(false)
87  , cached_(false)
88  , query_exec_time_(0)
90  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:973
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
size_t query_exec_time_
Definition: ResultSet.h:983
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:972
unsigned block_size_
Definition: ResultSet.h:947
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
size_t drop_first_
Definition: ResultSet.h:942
bool cached_
Definition: ResultSet.h:981
unsigned grid_size_
Definition: ResultSet.h:948
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:964
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
QueryPlanHash query_plan_
Definition: ResultSet.h:984
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 92 of file ResultSet.cpp.

103  : targets_(targets)
104  , device_type_(device_type)
105  , device_id_(device_id)
106  , query_mem_desc_(query_mem_desc)
107  , crt_row_buff_idx_(0)
108  , fetched_so_far_(0)
109  , drop_first_(0)
110  , keep_first_(0)
111  , row_set_mem_owner_(row_set_mem_owner)
112  , block_size_(block_size)
113  , grid_size_(grid_size)
114  , lazy_fetch_info_(lazy_fetch_info)
115  , col_buffers_{col_buffers}
116  , frag_offsets_{frag_offsets}
117  , consistent_frag_sizes_{consistent_frag_sizes}
118  , data_mgr_(nullptr)
120  , just_explain_(false)
121  , for_validation_only_(false)
124  , cached_(false)
125  , query_exec_time_(0)
127  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:973
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
size_t query_exec_time_
Definition: ResultSet.h:983
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:972
unsigned block_size_
Definition: ResultSet.h:947
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
size_t drop_first_
Definition: ResultSet.h:942
bool cached_
Definition: ResultSet.h:981
unsigned grid_size_
Definition: ResultSet.h:948
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:964
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:959
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
QueryPlanHash query_plan_
Definition: ResultSet.h:984
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:958
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 161 of file ResultSet.cpp.

References CPU.

163  , device_id_(-1)
164  , fetched_so_far_(0)
166  , explanation_(explanation)
167  , just_explain_(true)
168  , for_validation_only_(false)
171  , cached_(false)
172  , query_exec_time_(0)
174  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:973
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
size_t query_exec_time_
Definition: ResultSet.h:983
const bool just_explain_
Definition: ResultSet.h:972
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
bool cached_
Definition: ResultSet.h:981
std::string explanation_
Definition: ResultSet.h:971
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
QueryPlanHash query_plan_
Definition: ResultSet.h:984
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 176 of file ResultSet.cpp.

References CPU.

180  , device_id_(-1)
181  , fetched_so_far_(0)
182  , row_set_mem_owner_(row_set_mem_owner)
183  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
185  , just_explain_(true)
186  , for_validation_only_(false)
189  , cached_(false)
190  , query_exec_time_(0)
192  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:973
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
size_t query_exec_time_
Definition: ResultSet.h:983
const bool just_explain_
Definition: ResultSet.h:972
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool cached_
Definition: ResultSet.h:981
QueryExecutionTimings timings_
Definition: ResultSet.h:949
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
QueryPlanHash query_plan_
Definition: ResultSet.h:984
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::~ResultSet ( )

Definition at line 194 of file ResultSet.cpp.

References CHECK, CPU, and data_mgr_().

194  {
195  if (storage_) {
196  if (!storage_->buff_is_provided_) {
197  CHECK(storage_->getUnderlyingBuffer());
198  free(storage_->getUnderlyingBuffer());
199  }
200  }
201  for (auto& storage : appended_storage_) {
202  if (storage && !storage->buff_is_provided_) {
203  free(storage->getUnderlyingBuffer());
204  }
205  }
209  }
211  CHECK(data_mgr_);
213  }
214 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:964
int8_t * host_estimator_buffer_
Definition: ResultSet.h:963
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:525
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:962

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 718 of file ResultSet.cpp.

718  {
719  timings_.compilation_queue_time += compilation_queue_time;
720 }
QueryExecutionTimings timings_
Definition: ResultSet.h:949
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &  ,
std::shared_ptr< VarlenOutputInfo = nullptr 
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 297 of file ResultSet.cpp.

References CHECK.

297  {
299  if (!that.storage_) {
300  return;
301  }
302  appended_storage_.push_back(std::move(that.storage_));
305  appended_storage_.back()->query_mem_desc_.getEntryCount());
306  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
307  col_buffers_.insert(
308  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
309  frag_offsets_.insert(
310  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
312  that.consistent_frag_sizes_.begin(),
313  that.consistent_frag_sizes_.end());
314  chunk_iters_.insert(
315  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
317  CHECK(that.separate_varlen_storage_valid_);
319  that.serialized_varlen_buffer_.begin(),
320  that.serialized_varlen_buffer_.end());
321  }
322  for (auto& buff : that.literal_buffers_) {
323  literal_buffers_.push_back(std::move(buff));
324  }
325 }
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:952
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:969
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:951
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:955
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:959
#define CHECK(condition)
Definition: Logger.h:291
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:958
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
bool ResultSet::areAnyColumnsLazyFetched ( ) const
inline

Definition at line 581 of file ResultSet.h.

References anonymous_namespace{QueryMemoryDescriptor.cpp}::any_of(), and lazy_fetch_info_.

581  {
582  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
583  return std::any_of(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
584  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
bool any_of(std::vector< Analyzer::Expr * > const &target_exprs)

+ Here is the call graph for this function:

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 616 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

616  {
617  if (!storage_) {
618  return 0;
619  }
620 
621  size_t row_count = storage_->binSearchRowCount();
622  for (auto& s : appended_storage_) {
623  row_count += s->binSearchRowCount();
624  }
625 
626  return get_truncated_row_count(row_count, getLimit(), drop_first_);
627 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getLimit() const
Definition: ResultSet.cpp:1397
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:942

+ Here is the call graph for this function:

double ResultSet::calculateQuantile ( quantile::TDigest *const  t_digest)
static

Definition at line 1035 of file ResultSet.cpp.

References CHECK, quantile::detail::TDigest< RealType, IndexType >::mergeBufferFinal(), NULL_DOUBLE, and quantile::detail::TDigest< RealType, IndexType >::quantile().

Referenced by makeTargetValue().

1035  {
1036  static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
1037  CHECK(t_digest);
1038  t_digest->mergeBufferFinal();
1039  double const quantile = t_digest->quantile();
1040  return boost::math::isnan(quantile) ? NULL_DOUBLE : quantile;
1041 }
#define NULL_DOUBLE
DEVICE RealType quantile(VectorView< IndexType const > const partial_sum, RealType const q) const
Definition: quantile.h:827
DEVICE void mergeBufferFinal()
Definition: quantile.h:651
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
std::optional<bool> ResultSet::canUseSpeculativeTopNSort ( ) const
inline

Definition at line 519 of file ResultSet.h.

References can_use_speculative_top_n_sort.

519  {
521  }
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
void ResultSet::clearPermutation ( )
inline

Definition at line 464 of file ResultSet.h.

References permutation_.

Referenced by initStatus().

464  {
465  if (!permutation_.empty()) {
466  permutation_.clear();
467  }
468  }
Permutation permutation_
Definition: ResultSet.h:945

+ Here is the caller graph for this function:

size_t ResultSet::colCount ( ) const

Definition at line 410 of file ResultSet.cpp.

410  {
411  return just_explain_ ? 1 : targets_.size();
412 }
const bool just_explain_
Definition: ResultSet.h:972
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
ScalarTargetValue ResultSet::convertToScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  val 
) const

Definition at line 1088 of file ResultSetIteration.cpp.

References CHECK_EQ, SQLTypeInfo::get_compression(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, kFLOAT, and makeStringTargetValue().

Referenced by makeTargetValue().

1090  {
1091  if (ti.is_string()) {
1092  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1093  return makeStringTargetValue(ti, translate_strings, val);
1094  } else {
1095  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(shared::bit_cast<double>(val))
1096  : ti.is_any<kFLOAT>() ? ScalarTargetValue(shared::bit_cast<float>(val))
1097  : ScalarTargetValue(val);
1098  }
1099 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ResultSetPtr ResultSet::copy ( )

Definition at line 327 of file ResultSet.cpp.

References CHECK, gpu_enabled::copy(), and DEBUG_TIMER.

327  {
328  auto timer = DEBUG_TIMER(__func__);
329  if (!storage_) {
330  return nullptr;
331  }
332 
333  auto executor = getExecutor();
334  CHECK(executor);
335  ResultSetPtr copied_rs = std::make_shared<ResultSet>(targets_,
336  device_type_,
339  executor->blockSize(),
340  executor->gridSize());
341 
342  auto allocate_and_copy_storage =
343  [&](const ResultSetStorage* prev_storage) -> std::unique_ptr<ResultSetStorage> {
344  const auto& prev_qmd = prev_storage->query_mem_desc_;
345  const auto storage_size = prev_qmd.getBufferSizeBytes(device_type_);
346  auto buff = row_set_mem_owner_->allocate(storage_size, /*thread_idx=*/0);
347  std::unique_ptr<ResultSetStorage> new_storage;
348  new_storage.reset(new ResultSetStorage(
349  prev_storage->targets_, prev_qmd, buff, /*buff_is_provided=*/true));
350  new_storage->target_init_vals_ = prev_storage->target_init_vals_;
351  if (prev_storage->varlen_output_info_) {
352  new_storage->varlen_output_info_ = prev_storage->varlen_output_info_;
353  }
354  memcpy(new_storage->buff_, prev_storage->buff_, storage_size);
355  new_storage->query_mem_desc_ = prev_qmd;
356  return new_storage;
357  };
358 
359  copied_rs->storage_ = allocate_and_copy_storage(storage_.get());
360  if (!appended_storage_.empty()) {
361  for (const auto& storage : appended_storage_) {
362  copied_rs->appended_storage_.push_back(allocate_and_copy_storage(storage.get()));
363  }
364  }
365  std::copy(chunks_.begin(), chunks_.end(), std::back_inserter(copied_rs->chunks_));
366  std::copy(chunk_iters_.begin(),
367  chunk_iters_.end(),
368  std::back_inserter(copied_rs->chunk_iters_));
369  std::copy(col_buffers_.begin(),
370  col_buffers_.end(),
371  std::back_inserter(copied_rs->col_buffers_));
372  std::copy(frag_offsets_.begin(),
373  frag_offsets_.end(),
374  std::back_inserter(copied_rs->frag_offsets_));
377  std::back_inserter(copied_rs->consistent_frag_sizes_));
381  std::back_inserter(copied_rs->serialized_varlen_buffer_));
382  }
383  std::copy(literal_buffers_.begin(),
384  literal_buffers_.end(),
385  std::back_inserter(copied_rs->literal_buffers_));
386  std::copy(lazy_fetch_info_.begin(),
387  lazy_fetch_info_.end(),
388  std::back_inserter(copied_rs->lazy_fetch_info_));
389 
390  copied_rs->permutation_ = permutation_;
391  copied_rs->drop_first_ = drop_first_;
392  copied_rs->keep_first_ = keep_first_;
393  copied_rs->separate_varlen_storage_valid_ = separate_varlen_storage_valid_;
394  copied_rs->query_exec_time_ = query_exec_time_;
395  copied_rs->input_table_keys_ = input_table_keys_;
396  copied_rs->target_meta_info_ = target_meta_info_;
397  copied_rs->geo_return_type_ = geo_return_type_;
398  copied_rs->query_plan_ = query_plan_;
400  copied_rs->can_use_speculative_top_n_sort = can_use_speculative_top_n_sort;
401  }
402 
403  return copied_rs;
404 }
Permutation permutation_
Definition: ResultSet.h:945
AppendedStorage appended_storage_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:991
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t query_exec_time_
Definition: ResultSet.h:983
std::shared_ptr< ResultSet > ResultSetPtr
size_t keep_first_
Definition: ResultSet.h:943
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:952
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:969
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
const Executor * getExecutor() const
Definition: ResultSet.h:627
size_t drop_first_
Definition: ResultSet.h:942
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:951
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:955
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:985
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:986
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:959
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
QueryPlanHash query_plan_
Definition: ResultSet.h:984
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:958
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970

+ Here is the call graph for this function:

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copies its content into a contiguous output_buffer

Definition at line 1171 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1173  {
1175  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1176  CHECK(output_buffer_size > 0);
1177  CHECK(output_buffer);
1178  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1179  size_t out_buff_offset = 0;
1180 
1181  // the main storage:
1182  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1183  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1184  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1185  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1186  CHECK(crt_buffer_size <= output_buffer_size);
1187  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1188 
1189  out_buff_offset += crt_buffer_size;
1190 
1191  // the appended storages:
1192  for (size_t i = 0; i < appended_storage_.size(); i++) {
1193  const size_t crt_storage_row_count =
1194  appended_storage_[i]->query_mem_desc_.getEntryCount();
1195  if (crt_storage_row_count == 0) {
1196  // skip an empty appended storage
1197  continue;
1198  }
1199  CHECK_LT(out_buff_offset, output_buffer_size);
1200  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1201  const size_t column_offset =
1202  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1203  const int8_t* storage_buffer =
1204  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1205  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1206  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1207 
1208  out_buff_offset += crt_buffer_size;
1209  }
1210 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK(condition)
Definition: Logger.h:291
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1465

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( CountDistinctSet count_distinct_active_buffer_set) const
private
Comparator ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const PermutationView  permutation,
const Executor executor,
const bool  single_threaded 
)
inlineprivate

Definition at line 869 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

872  {
873  auto timer = DEBUG_TIMER(__func__);
875  return [rsc = ResultSetComparator<ColumnWiseTargetAccessor>(
876  order_entries, this, permutation, executor, single_threaded)](
877  const PermutationIdx lhs, const PermutationIdx rhs) {
878  return rsc(lhs, rhs);
879  };
880  } else {
881  return [rsc = ResultSetComparator<RowWiseTargetAccessor>(
882  order_entries, this, permutation, executor, single_threaded)](
883  const PermutationIdx lhs, const PermutationIdx rhs) {
884  return rsc(lhs, rhs);
885  };
886  }
887  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define DEBUG_TIMER(name)
Definition: Logger.h:411

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 668 of file ResultSet.cpp.

668  {
669  return (!storage_ && !estimator_ && !just_explain_) || cached_row_count_ == 0;
670 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const bool just_explain_
Definition: ResultSet.h:972
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:961
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 557 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

557 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 59 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

59  {
61  drop_first_ = n;
62 }
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:942
constexpr double n
Definition: Utm.h:38
void ResultSet::eachCellInColumn ( RowIterationState state,
CellCallback const &  func 
)

Definition at line 485 of file ResultSet.cpp.

References advance_slot(), advance_to_next_columnar_target_buff(), ResultSet::RowIterationState::agg_idx_, align_to_int64(), ResultSet::RowIterationState::buf_ptr_, CHECK, CHECK_GE, CHECK_LT, ResultSet::RowIterationState::compact_sz1_, ResultSet::RowIterationState::cur_target_idx_, QueryMemoryDescriptor::didOutputColumnar(), get_cols_ptr(), get_key_bytes_rowwise(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), ResultSet::RowIterationState::prev_target_idx_, read_int_from_buff(), and row_ptr_rowwise().

485  {
486  size_t const target_idx = state.cur_target_idx_;
487  QueryMemoryDescriptor& storage_qmd = storage_->query_mem_desc_;
488  CHECK_LT(target_idx, lazy_fetch_info_.size());
489  auto& col_lazy_fetch = lazy_fetch_info_[target_idx];
490  CHECK(col_lazy_fetch.is_lazily_fetched);
491  int const target_size = storage_->targets_[target_idx].sql_type.get_size();
492  CHECK_LT(0, target_size) << storage_->targets_[target_idx].toString();
493  size_t const nrows = storage_->binSearchRowCount();
494  if (storage_qmd.didOutputColumnar()) {
495  // Logic based on ResultSet::ColumnWiseTargetAccessor::initializeOffsetsForStorage()
496  if (state.buf_ptr_ == nullptr) {
497  state.buf_ptr_ = get_cols_ptr(storage_->buff_, storage_qmd);
498  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
499  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
501  }
502  for (size_t j = state.prev_target_idx_; j < state.cur_target_idx_; ++j) {
503  size_t const next_target_idx = j + 1; // Set state to reflect next target_idx j+1
504  state.buf_ptr_ = advance_to_next_columnar_target_buff(
505  state.buf_ptr_, storage_qmd, state.agg_idx_);
506  auto const& next_agg_info = storage_->targets_[next_target_idx];
507  state.agg_idx_ =
508  advance_slot(state.agg_idx_, next_agg_info, separate_varlen_storage_valid_);
509  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
510  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
512  }
513  for (size_t i = 0; i < nrows; ++i) {
514  int8_t const* const pos_ptr = state.buf_ptr_ + i * state.compact_sz1_;
515  int64_t pos = read_int_from_buff(pos_ptr, target_size);
516  CHECK_GE(pos, 0);
517  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
518  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
519  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
520  func(col_frag + pos * target_size);
521  }
522  } else {
523  size_t const key_bytes_with_padding =
525  for (size_t i = 0; i < nrows; ++i) {
526  int8_t const* const keys_ptr = row_ptr_rowwise(storage_->buff_, storage_qmd, i);
527  int8_t const* const rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
528  int64_t pos = *reinterpret_cast<int64_t const*>(rowwise_target_ptr);
529  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
530  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
531  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
532  func(col_frag + pos * target_size);
533  }
534  }
535 }
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getEffectiveKeyWidth() const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const

+ Here is the call graph for this function:

size_t ResultSet::entryCount ( ) const

Returns the number of entries the result set is allocated to hold.

Note that this can be greater than or equal to the actual number of valid rows in the result set, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by)

For getting the number of valid rows in the result set (inclusive of any applied LIMIT and/or OFFSET), use ResultSet::rowCount(). Or to just test if there are any valid rows, use ResultSet::entryCount(), as a return value from entryCount() greater than 0 does not neccesarily mean the result set is empty.

Definition at line 752 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

752  {
753  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
754 }
Permutation permutation_
Definition: ResultSet.h:945
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 426 of file ResultSet.h.

References CHECK, and storage_.

426  {
427  CHECK(storage_);
428  if (storage_->query_mem_desc_.didOutputColumnar()) {
429  storage_->fillOneEntryColWise(entry);
430  } else {
431  storage_->fillOneEntryRowWise(entry);
432  }
433  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:291
ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 939 of file ResultSet.cpp.

Referenced by getVarlenOutputInfo(), and makeGeoTargetValue().

939  {
940  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
941  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
942  fixedup_entry_idx,
943  stg_idx};
944 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 756 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), Executor::executeTableFunction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initRowGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

757  {
758  auto query_mem_desc_copy = query_mem_desc;
759  query_mem_desc_copy.resetGroupColWidths(
760  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
761  if (query_mem_desc.didOutputColumnar()) {
762  return query_mem_desc_copy;
763  }
764  query_mem_desc_copy.alignPaddedSlots();
765  return query_mem_desc_copy;
766 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

unsigned ResultSet::getBlockSize ( ) const
inline

Definition at line 532 of file ResultSet.h.

References block_size_.

532 { return block_size_; }
unsigned block_size_
Definition: ResultSet.h:947
size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 756 of file ResultSetIteration.cpp.

References CHECK, and storage_.

756  {
757  CHECK(storage_);
758  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
759 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:291
SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 414 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

414  {
415  if (just_explain_) {
416  return SQLTypeInfo(kTEXT, false);
417  }
418  CHECK_LT(col_idx, targets_.size());
419  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
420  : targets_[col_idx].sql_type;
421 }
const bool just_explain_
Definition: ResultSet.h:972
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
#define CHECK_LT(x, y)
Definition: Logger.h:303
Definition: sqltypes.h:69
Definition: sqldefs.h:74
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1343 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1345  {
1346  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1347  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1348  const auto column_offset =
1349  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1350  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1351  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1352  storage_->query_mem_desc_.getEntryCount();
1353  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1354  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1355 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:302
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1496 of file ResultSet.cpp.

References CHECK.

1496  {
1498  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1499 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1487
#define CHECK(condition)
Definition: Logger.h:291
size_t getColOffInBytes(const size_t col_idx) const
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1289 of file ResultSetIteration.cpp.

References storage_.

1291  {
1292  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1293  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1294  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1295 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1136 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1138  {
1139  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1140  if (col_buffers_[storage_idx].size() > 1) {
1141  int64_t frag_id = 0;
1142  int64_t local_idx = global_idx;
1143  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1144  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1145  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1146  } else {
1147  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1148  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1149  CHECK_LE(local_idx, global_idx);
1150  }
1151  CHECK_GE(frag_id, int64_t(0));
1152  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1153  global_idx = local_idx;
1154  return col_buffers_[storage_idx][frag_id];
1155  } else {
1156  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1157  return col_buffers_[storage_idx][0];
1158  }
1159 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
#define CHECK_GE(x, y)
Definition: Logger.h:306
#define CHECK_LT(x, y)
Definition: Logger.h:303
#define CHECK_LE(x, y)
Definition: Logger.h:304
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:959
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:958
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 289 of file ResultSet.cpp.

289  {
290  if (crt_row_buff_idx_ == 0) {
291  throw std::runtime_error("current row buffer iteration index is undefined");
292  }
293  return crt_row_buff_idx_ - 1;
294 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 686 of file ResultSet.cpp.

References CHECK, and GPU.

686  {
690 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:962
int ResultSet::getDeviceId ( ) const

Definition at line 752 of file ResultSet.cpp.

752  {
753  return device_id_;
754 }
const int device_id_
Definition: ResultSet.h:936
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 248 of file ResultSet.cpp.

248  {
249  return device_type_;
250 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1213 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1215  {
1216  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1217  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1218  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1219  } else {
1220  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1221  }
1222  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1223  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1224  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1225  } else {
1226  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1227  }
1228  } else {
1229  UNREACHABLE() << "Invalid query type is used";
1230  return 0;
1231  }
1232 }
#define UNREACHABLE()
Definition: Logger.h:337
const long ResultSet::getExecTime ( ) const
inline

Definition at line 499 of file ResultSet.h.

References query_exec_time_.

499 { return query_exec_time_; }
size_t query_exec_time_
Definition: ResultSet.h:983
const Executor* ResultSet::getExecutor ( ) const
inline

Definition at line 627 of file ResultSet.h.

References QueryMemoryDescriptor::getExecutor(), and query_mem_desc_.

627 { return query_mem_desc_.getExecutor(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
const Executor * getExecutor() const

+ Here is the call graph for this function:

std::string ResultSet::getExplanation ( ) const
inline

Definition at line 391 of file ResultSet.h.

References explanation_, and just_explain_.

391  {
392  if (just_explain_) {
393  return explanation_;
394  }
395  return {};
396  }
const bool just_explain_
Definition: ResultSet.h:972
std::string explanation_
Definition: ResultSet.h:971
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 548 of file ResultSet.h.

References geo_return_type_.

548 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
int ResultSet::getGpuCount ( ) const
private
unsigned ResultSet::getGridSize ( ) const
inline

Definition at line 534 of file ResultSet.h.

References grid_size_.

534 { return grid_size_; }
unsigned grid_size_
Definition: ResultSet.h:948
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 692 of file ResultSet.cpp.

692  {
693  return host_estimator_buffer_;
694 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:963
std::unordered_set<size_t> ResultSet::getInputTableKeys ( ) const
inline

Definition at line 505 of file ResultSet.h.

References input_table_keys_.

505 { return input_table_keys_; }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:985
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 577 of file ResultSet.h.

References lazy_fetch_info_.

577  {
578  return lazy_fetch_info_;
579  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
size_t ResultSet::getLimit ( ) const

Definition at line 1397 of file ResultSet.cpp.

1397  {
1398  return keep_first_;
1399 }
size_t keep_first_
Definition: ResultSet.h:943
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, CHECK_LE, LOG, and logger::WARNING.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  if (bits_set == 0) {
38  // empty result set, return 1 for a groups buffer size of 1
39  return 1;
40  }
41  const auto total_bits = estimator_->getBufferSize() * 8;
42  CHECK_LE(bits_set, total_bits);
43  const auto unset_bits = total_bits - bits_set;
44  const auto ratio = static_cast<double>(unset_bits) / total_bits;
45  if (ratio == 0.) {
46  LOG(WARNING)
47  << "Failed to get a high quality cardinality estimation, falling back to "
48  "approximate group by buffer size guess.";
49  return 0;
50  }
51  return -static_cast<double>(total_bits) * log(ratio);
52 }
#define LOG(tag)
Definition: Logger.h:285
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:961
#define CHECK_LE(x, y)
Definition: Logger.h:304
int8_t * host_estimator_buffer_
Definition: ResultSet.h:963
#define CHECK(condition)
Definition: Logger.h:291
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 296 of file ResultSetIteration.cpp.

297  {
298  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299  if (!storage_ && !just_explain_) {
300  return {};
301  }
302  return getNextRowUnlocked(translate_strings, decimal_to_double);
303 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:975
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const bool just_explain_
Definition: ResultSet.h:972
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 318 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

319  {
320  size_t entry_buff_idx = 0;
321  do {
323  return {};
324  }
325 
326  entry_buff_idx = advanceCursorToNextEntry();
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
330  return {};
331  }
333  ++fetched_so_far_;
334 
335  } while (drop_first_ && fetched_so_far_ <= drop_first_);
336 
337  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
338  CHECK(!row.empty());
339 
340  return row;
341 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:943
size_t drop_first_
Definition: ResultSet.h:942
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:291
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
size_t advanceCursorToNextEntry() const
std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 305 of file ResultSetIteration.cpp.

307  {
308  if (just_explain_) {
309  if (fetched_so_far_) {
310  return {};
311  }
312  fetched_so_far_ = 1;
313  return {explanation_};
314  }
315  return getNextRowImpl(translate_strings, decimal_to_double);
316 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:972
std::string explanation_
Definition: ResultSet.h:971
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t ResultSet::getNumColumnsLazyFetched ( ) const
inline

Definition at line 586 of file ResultSet.h.

References lazy_fetch_info_.

586  {
587  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
588  return std::count_if(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
589  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 234 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), and row_ptr_rowwise().

234  {
235  const auto storage_lookup_result = findStorage(global_entry_idx);
236  const auto storage = storage_lookup_result.storage_ptr;
237  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238  if (storage->isEmptyEntry(local_entry_idx)) {
239  return {0, false};
240  }
241  const auto buff = storage->buff_;
242  CHECK(buff);
244  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245  const auto key_bytes_with_padding =
247  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
249  keys_ptr,
250  global_entry_idx,
251  targets_.front(),
252  0,
253  0,
254  false,
255  false,
256  false);
257  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
258  CHECK(scalar_tv);
259  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
260  CHECK(ival_ptr);
261  return {*ival_ptr, true};
262 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK(condition)
Definition: Logger.h:291
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 566 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

566  {
567  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
568  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const Permutation & ResultSet::getPermutationBuffer ( ) const

Definition at line 862 of file ResultSet.cpp.

862  {
863  return permutation_;
864 }
Permutation permutation_
Definition: ResultSet.h:945
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 562 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

562  {
564  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 672 of file ResultSet.cpp.

References CHECK.

672  {
673  CHECK(storage_);
674  return storage_->query_mem_desc_;
675 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:291
const QueryPlanHash ResultSet::getQueryPlanHash ( )
inline

Definition at line 503 of file ResultSet.h.

References query_plan_.

503 { return query_plan_; }
QueryPlanHash query_plan_
Definition: ResultSet.h:984
int64_t ResultSet::getQueueTime ( ) const

Definition at line 722 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 727 of file ResultSet.cpp.

727  {
728  return timings_.render_time;
729 }
QueryExecutionTimings timings_
Definition: ResultSet.h:949
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 273 of file ResultSetIteration.cpp.

275  {
276  if (logical_index >= entryCount()) {
277  return {};
278  }
279  const auto entry_idx =
280  permutation_.empty() ? logical_index : permutation_[logical_index];
281  return getRowAt(entry_idx, false, false, false, targets_to_skip);
282 }
Permutation permutation_
Definition: ResultSet.h:945
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 447 of file ResultSet.h.

References row_set_mem_owner_.

447  {
448  return row_set_mem_owner_;
449  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1321 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1323  {
1324  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1325  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1326  auto keys_ptr = row_ptr_rowwise(
1327  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1328  const auto column_offset =
1329  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1330  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1331  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1332  const auto storage_buffer = keys_ptr + column_offset;
1333  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1334 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:302
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1304 of file ResultSetIteration.cpp.

References storage_.

1306  {
1307  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1308  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1309  const int8_t* storage_buffer =
1310  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1311  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1312 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1502 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1502  {
1503  std::vector<bool> target_bitmap(targets_.size(), true);
1504  size_t num_single_slot_targets = 0;
1505  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1506  const auto& sql_type = targets_[target_idx].sql_type;
1507  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1508  target_bitmap[target_idx] = false;
1509  } else if (sql_type.is_varlen()) {
1510  target_bitmap[target_idx] = false;
1511  } else {
1512  num_single_slot_targets++;
1513  }
1514  }
1515  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1516 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
Definition: sqldefs.h:74

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1546 of file ResultSet.cpp.

References advance_slot().

1546  {
1547  std::vector<size_t> slot_indices(targets_.size(), 0);
1548  size_t slot_index = 0;
1549  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1550  slot_indices[target_idx] = slot_index;
1551  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1552  }
1553  return slot_indices;
1554 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 406 of file ResultSet.cpp.

406  {
407  return storage_.get();
408 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 914 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

914  {
915  size_t fixedup_entry_idx = entry_idx;
916  auto entry_count = storage_->query_mem_desc_.getEntryCount();
917  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
918  if (fixedup_entry_idx < entry_count) {
919  return {0, fixedup_entry_idx};
920  }
921  fixedup_entry_idx -= entry_count;
922  for (size_t i = 0; i < appended_storage_.size(); ++i) {
923  const auto& desc = appended_storage_[i]->query_mem_desc_;
924  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
925  entry_count = desc.getEntryCount();
926  if (fixedup_entry_idx < entry_count) {
927  return {i + 1, fixedup_entry_idx};
928  }
929  fixedup_entry_idx -= entry_count;
930  }
931  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
933  return {};
934 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define UNREACHABLE()
Definition: Logger.h:337
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:302

+ Here is the caller graph for this function:

std::string ResultSet::getString ( SQLTypeInfo const &  ti,
int64_t const  ival 
) const

Definition at line 1867 of file ResultSetIteration.cpp.

References StringDictionaryProxy::getString(), SQLTypeInfo::getStringDictKey(), and row_set_mem_owner_.

Referenced by anonymous_namespace{ResultSetIteration.cpp}::build_string_array_target_value(), isLessThan(), and makeStringTargetValue().

1867  {
1868  const auto& dict_key = ti.getStringDictKey();
1869  StringDictionaryProxy* sdp;
1870  if (dict_key.dict_id) {
1871  constexpr bool with_generation = false;
1872  sdp = dict_key.db_id > 0
1873  ? row_set_mem_owner_->getOrAddStringDictProxy(dict_key, with_generation)
1874  : row_set_mem_owner_->getStringDictProxy(
1875  dict_key); // unit tests bypass the catalog
1876  } else {
1877  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1878  }
1879  return sdp->getString(ival);
1880 }
std::string getString(int32_t string_id) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const std::vector< std::string > ResultSet::getStringDictionaryPayloadCopy ( const shared::StringDictKey dict_key) const

Definition at line 1401 of file ResultSet.cpp.

References CHECK.

1402  {
1403  const auto sdp =
1404  row_set_mem_owner_->getOrAddStringDictProxy(dict_key, /*with_generation=*/true);
1405  CHECK(sdp);
1406  return sdp->getDictionary()->copyStrings();
1407 }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:291
StringDictionaryProxy * ResultSet::getStringDictionaryProxy ( const shared::StringDictKey dict_key) const

Definition at line 423 of file ResultSet.cpp.

References shared::StringDictKey::db_id, shared::StringDictKey::dict_id, and dict_ref_t::literalsDictId.

424  {
425  constexpr bool with_generation = true;
426  return (dict_key.db_id > 0 || dict_key.dict_id == DictRef::literalsDictId)
427  ? row_set_mem_owner_->getOrAddStringDictProxy(dict_key, with_generation)
428  : row_set_mem_owner_->getStringDictProxy(dict_key);
429 }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
static constexpr int32_t literalsDictId
Definition: DictRef.h:18
std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1526 of file ResultSet.cpp.

References CHECK, CHECK_GE, is_distinct_target(), kFLOAT, and kSAMPLE.

1527  {
1529  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1530 
1531  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1532  const auto& target = targets_[target_idx];
1533  if (single_slot_targets[target_idx] &&
1534  (is_distinct_target(target) ||
1535  shared::is_any<kAPPROX_QUANTILE, kMODE>(target.agg_kind) ||
1536  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1537  single_slot_targets[target_idx] = false;
1538  num_single_slot_targets--;
1539  }
1540  }
1541  CHECK_GE(num_single_slot_targets, size_t(0));
1542  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1543 }
#define CHECK_GE(x, y)
Definition: Logger.h:306
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1502
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
#define CHECK(condition)
Definition: Logger.h:291
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1465

+ Here is the call graph for this function:

ChunkStats ResultSet::getTableFunctionChunkStats ( const size_t  target_idx) const
const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 677 of file ResultSet.cpp.

677  {
678  return targets_;
679 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 681 of file ResultSet.cpp.

References CHECK.

681  {
682  CHECK(storage_);
683  return storage_->target_init_vals_;
684 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:291
std::vector<TargetMetaInfo> ResultSet::getTargetMetaInfo ( )
inline

Definition at line 517 of file ResultSet.h.

References target_meta_info_.

517 { return target_meta_info_; }
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:986
TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 2028 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

2038  {
2040  const auto col1_ptr = col_ptr;
2041  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
2042  const auto next_col_ptr =
2043  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
2044  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2045  is_real_str_or_array(target_info))
2046  ? next_col_ptr
2047  : nullptr;
2048  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2049  is_real_str_or_array(target_info))
2050  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
2051  : 0;
2052 
2053  // TODO(Saman): add required logics for count distinct
2054  // geospatial target values:
2055  if (target_info.sql_type.is_geometry()) {
2056  return makeGeoTargetValue(
2057  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
2058  }
2059 
2060  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
2061  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2062  CHECK(col2_ptr);
2063  CHECK(compact_sz2);
2064  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
2065  return target_info.agg_kind == kAVG
2066  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2067  : makeVarlenTargetValue(ptr1,
2068  compact_sz1,
2069  ptr2,
2070  compact_sz2,
2071  target_info,
2072  target_logical_idx,
2073  translate_strings,
2074  global_entry_idx);
2075  }
2077  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2078  return makeTargetValue(ptr1,
2079  compact_sz1,
2080  target_info,
2081  target_logical_idx,
2082  translate_strings,
2084  global_entry_idx);
2085  }
2086  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2087  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
2088  CHECK_GE(key_idx, 0);
2089  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
2090  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
2091  key_width,
2092  target_info,
2093  target_logical_idx,
2094  translate_strings,
2096  global_entry_idx);
2097 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:306
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:592
Definition: sqldefs.h:74
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 2101 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

2110  {
2111  if (UNLIKELY(fixup_count_distinct_pointers)) {
2112  if (is_distinct_target(target_info)) {
2113  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2114  const auto remote_ptr = *count_distinct_ptr_ptr;
2115  if (remote_ptr) {
2116  const auto ptr = storage_->mappedPtr(remote_ptr);
2117  if (ptr) {
2118  *count_distinct_ptr_ptr = ptr;
2119  } else {
2120  // need to create a zero filled buffer for this remote_ptr
2121  const auto& count_distinct_desc =
2122  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2123  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2124  ? count_distinct_desc.bitmapSizeBytes()
2125  : count_distinct_desc.bitmapPaddedSizeBytes();
2126  auto count_distinct_buffer = row_set_mem_owner_->allocateCountDistinctBuffer(
2127  bitmap_byte_sz, /*thread_idx=*/0);
2128  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2129  }
2130  }
2131  }
2132  return int64_t(0);
2133  }
2134  if (target_info.sql_type.is_geometry()) {
2135  return makeGeoTargetValue(
2136  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2137  }
2138 
2139  auto ptr1 = rowwise_target_ptr;
2140  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2142  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2143  // Single column perfect hash group by can utilize one slot for both the key and the
2144  // target value if both values fit in 8 bytes. Use the target value actual size for
2145  // this case. If they don't, the target value should be 8 bytes, so we can still use
2146  // the actual size rather than the compact size.
2147  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2148  }
2149 
2150  // logic for deciding width of column
2151  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2152  const auto ptr2 =
2153  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2154  int8_t compact_sz2 = 0;
2155  // Skip reading the second slot if we have a none encoded string and are using
2156  // the none encoded strings buffer attached to ResultSetStorage
2158  (target_info.sql_type.is_array() ||
2159  (target_info.sql_type.is_string() &&
2160  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2161  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2162  }
2163  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2164  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2165  }
2166  CHECK(ptr2);
2167  return target_info.agg_kind == kAVG
2168  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2169  : makeVarlenTargetValue(ptr1,
2170  compact_sz1,
2171  ptr2,
2172  compact_sz2,
2173  target_info,
2174  target_logical_idx,
2175  translate_strings,
2176  entry_buff_idx);
2177  }
2179  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2180  return makeTargetValue(ptr1,
2181  compact_sz1,
2182  target_info,
2183  target_logical_idx,
2184  translate_strings,
2186  entry_buff_idx);
2187  }
2188  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2189  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2190  return makeTargetValue(ptr1,
2191  key_width,
2192  target_info,
2193  target_logical_idx,
2194  translate_strings,
2196  entry_buff_idx);
2197 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:389
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:592
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
bool is_string() const
Definition: sqltypes.h:580
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:74
bool is_array() const
Definition: sqltypes.h:588
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

const std::pair< std::vector< int32_t >, std::vector< std::string > > ResultSet::getUniqueStringsForDictEncodedTargetCol ( const size_t  col_idx) const

Definition at line 1410 of file ResultSet.cpp.

References CHECK, and inline_fixed_encoding_null_val().

1410  {
1411  const auto col_type_info = getColType(col_idx);
1412  std::unordered_set<int32_t> unique_string_ids_set;
1413  const size_t num_entries = entryCount();
1414  std::vector<bool> targets_to_skip(colCount(), true);
1415  targets_to_skip[col_idx] = false;
1416  CHECK(col_type_info.is_dict_encoded_type()); // Array<Text> or Text
1417  const int64_t null_val = inline_fixed_encoding_null_val(
1418  col_type_info.is_array() ? col_type_info.get_elem_type() : col_type_info);
1419 
1420  for (size_t row_idx = 0; row_idx < num_entries; ++row_idx) {
1421  const auto result_row = getRowAtNoTranslations(row_idx, targets_to_skip);
1422  if (!result_row.empty()) {
1423  if (const auto scalar_col_val =
1424  boost::get<ScalarTargetValue>(&result_row[col_idx])) {
1425  const int32_t string_id =
1426  static_cast<int32_t>(boost::get<int64_t>(*scalar_col_val));
1427  if (string_id != null_val) {
1428  unique_string_ids_set.emplace(string_id);
1429  }
1430  } else if (const auto array_col_val =
1431  boost::get<ArrayTargetValue>(&result_row[col_idx])) {
1432  if (*array_col_val) {
1433  for (const ScalarTargetValue& scalar : array_col_val->value()) {
1434  const int32_t string_id = static_cast<int32_t>(boost::get<int64_t>(scalar));
1435  if (string_id != null_val) {
1436  unique_string_ids_set.emplace(string_id);
1437  }
1438  }
1439  }
1440  }
1441  }
1442  }
1443 
1444  const size_t num_unique_strings = unique_string_ids_set.size();
1445  std::vector<int32_t> unique_string_ids(num_unique_strings);
1446  size_t string_idx{0};
1447  for (const auto unique_string_id : unique_string_ids_set) {
1448  unique_string_ids[string_idx++] = unique_string_id;
1449  }
1450 
1451  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1452  col_type_info.getStringDictKey(), /*with_generation=*/true);
1453  CHECK(sdp);
1454 
1455  return std::make_pair(unique_string_ids, sdp->getStrings(unique_string_ids));
1456 }
size_t colCount() const
Definition: ResultSet.cpp:410
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
SQLTypeInfo getColType(const size_t col_idx) const
Definition: ResultSet.cpp:414
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:291
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto data_mgr = executor->getDataMgr();
636  auto allocator = std::make_unique<CudaAllocator>(
637  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
638  allocator->copyFromDevice(
639  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
640  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
641  } else {
643  host_str_ptr = reinterpret_cast<char*>(str_ptr);
644  }
645  std::string str(host_str_ptr, str_len);
646  return InternalTargetValue(row_set_mem_owner_->addString(str));
647 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

const VarlenOutputInfo * ResultSet::getVarlenOutputInfo ( const size_t  entry_idx) const
private

Definition at line 1161 of file ResultSetIteration.cpp.

References CHECK, and findStorage().

Referenced by makeGeoTargetValue().

1161  {
1162  auto storage_lookup_result = findStorage(entry_idx);
1163  CHECK(storage_lookup_result.storage_ptr);
1164  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1165 }
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::hasValidBuffer ( ) const
inline

Definition at line 525 of file ResultSet.h.

References storage_.

525  {
526  if (storage_) {
527  return true;
528  }
529  return false;
530  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 440 of file ResultSet.h.

References chunk_iters_.

440  {
441  chunk_iters_.push_back(chunk_iters);
442  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:952
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 437 of file ResultSet.h.

References chunks_.

437  {
438  chunks_ = chunks;
439  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:951
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 443 of file ResultSet.h.

References literal_buffers_.

443  {
444  literal_buffers_.push_back(std::move(literal_buff));
445  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:955
void ResultSet::initializeStorage ( ) const

Definition at line 1043 of file ResultSetReduction.cpp.

1043  {
1045  storage_->initializeColWise();
1046  } else {
1047  storage_->initializeRowWise();
1048  }
1049 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
PermutationView ResultSet::initPermutationBuffer ( PermutationView  permutation,
PermutationIdx const  begin,
PermutationIdx const  end 
) const
private

Definition at line 846 of file ResultSet.cpp.

References CHECK, DEBUG_TIMER, and VectorView< T >::push_back().

848  {
849  auto timer = DEBUG_TIMER(__func__);
850  for (PermutationIdx i = begin; i < end; ++i) {
851  const auto storage_lookup_result = findStorage(i);
852  const auto lhs_storage = storage_lookup_result.storage_ptr;
853  const auto off = storage_lookup_result.fixedup_entry_idx;
854  CHECK(lhs_storage);
855  if (!lhs_storage->isEmptyEntry(off)) {
856  permutation.push_back(i);
857  }
858  }
859  return permutation;
860 }
DEVICE void push_back(T const &value)
Definition: VectorView.h:73
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411

+ Here is the call graph for this function:

void ResultSet::initStatus ( )
inline

Definition at line 470 of file ResultSet.h.

References clearPermutation(), crt_row_buff_idx_, drop_first_, fetched_so_far_, invalidateCachedRowCount(), keep_first_, setGeoReturnType(), and WktString.

470  {
471  // todo(yoonmin): what else we additionally need to consider
472  // to make completely clear status of the resultset for reuse?
473  crt_row_buff_idx_ = 0;
474  fetched_so_far_ = 0;
478  drop_first_ = 0;
479  keep_first_ = 0;
480  }
void setGeoReturnType(const GeoReturnType val)
Definition: ResultSet.h:549
size_t keep_first_
Definition: ResultSet.h:943
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
size_t drop_first_
Definition: ResultSet.h:942
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
void clearPermutation()
Definition: ResultSet.h:464

+ Here is the call graph for this function:

void ResultSet::invalidateCachedRowCount ( ) const

Definition at line 605 of file ResultSet.cpp.

References uninitialized_cached_row_count.

Referenced by initStatus().

605  {
607 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52

+ Here is the caller graph for this function:

void ResultSet::invalidateResultSetChunks ( )
inline

Definition at line 482 of file ResultSet.h.

References chunk_iters_, and chunks_.

482  {
483  if (!chunks_.empty()) {
484  chunks_.clear();
485  }
486  if (!chunk_iters_.empty()) {
487  chunk_iters_.clear();
488  }
489  };
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:952
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:951
const bool ResultSet::isCached ( ) const
inline

Definition at line 495 of file ResultSet.h.

References cached_.

495 { return cached_; }
bool cached_
Definition: ResultSet.h:981
bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1465 of file ResultSet.cpp.

References CHECK, g_enable_direct_columnarization, GroupByBaselineHash, GroupByPerfectHash, Projection, and TableFunction.

Referenced by copyColumnIntoBuffer().

1465  {
1467  return false;
1468  } else if (query_mem_desc_.didOutputColumnar()) {
1469  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1477  } else {
1480  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1484  }
1485 }
Permutation permutation_
Definition: ResultSet.h:945
bool g_enable_direct_columnarization
Definition: Execute.cpp:122
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
QueryDescriptionType getQueryDescriptionType() const
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

bool ResultSet::isEmpty ( ) const

Returns a boolean signifying whether there are valid entries in the result set.

Note a result set can be logically empty even if the value returned by ResultSet::entryCount() is > 0, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function is just implemented as ResultSet::rowCount() == 0, which caches it's value so the row count will only be computed once per finalized result set.

Definition at line 649 of file ResultSet.cpp.

649  {
650  // To simplify this function and de-dup logic with ResultSet::rowCount()
651  // (mismatches between the two were causing bugs), we modified this function
652  // to simply fetch rowCount(). The potential downside of this approach is that
653  // in some cases more work will need to be done, as we can't just stop at the first row.
654  // Mitigating that for most cases is the following:
655  // 1) rowCount() is cached, so the logic for actually computing row counts will run only
656  // once
657  // per result set.
658  // 2) If the cache is empty (cached_row_count_ == -1), rowCount() will use parallel
659  // methods if deemed appropriate, which in many cases could be faster for a sparse
660  // large result set that single-threaded iteration from the beginning
661  // 3) Often where isEmpty() is needed, rowCount() is also needed. Since the first call
662  // to rowCount()
663  // will be cached, there is no extra overhead in these cases
664 
665  return rowCount() == size_t(0);
666 }
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:593
const bool ResultSet::isEstimator ( ) const
inline

Definition at line 491 of file ResultSet.h.

References estimator_.

491 { return !estimator_; }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:961
bool ResultSet::isExplain ( ) const

Definition at line 740 of file ResultSet.cpp.

740  {
741  return just_explain_;
742 }
const bool just_explain_
Definition: ResultSet.h:972
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1479 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1479  {
1480  // This should match the logic in makeGeoTargetValue which ultimately calls
1481  // fetch_data_from_gpu when the geo column is on the device.
1482  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1483  // utility function that handles this logic in one place
1484  CHECK_LT(col_idx, targets_.size());
1485  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1486  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1487  " is not a geo column. It is of type " +
1488  targets_[col_idx].sql_type.get_type_name() + ".");
1489  }
1490 
1491  const auto& target_info = targets_[col_idx];
1492  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1493  return false;
1494  }
1495 
1496  if (!lazy_fetch_info_.empty()) {
1497  CHECK_LT(col_idx, lazy_fetch_info_.size());
1498  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1499  return false;
1500  }
1501  }
1502 
1504 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
#define CHECK_LT(x, y)
Definition: Logger.h:303
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
#define IS_GEO(T)
Definition: sqltypes.h:300

+ Here is the call graph for this function:

bool ResultSet::isLessThan ( SQLTypeInfo const &  ti,
int64_t const  lhs,
int64_t const  rhs 
) const

Definition at line 1111 of file ResultSetIteration.cpp.

References shared::bit_cast(), CHECK_EQ, SQLTypeInfo::get_compression(), getString(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, and kFLOAT.

1113  {
1114  if (ti.is_string()) {
1115  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1116  return getString(ti, lhs) < getString(ti, rhs);
1117  } else {
1118  return ti.is_any<kDOUBLE>()
1119  ? shared::bit_cast<double>(lhs) < shared::bit_cast<double>(rhs)
1120  : ti.is_any<kFLOAT>()
1121  ? shared::bit_cast<float>(lhs) < shared::bit_cast<float>(rhs)
1122  : lhs < rhs;
1123  }
1124 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
TO bit_cast(FROM &&from)
Definition: misc.h:298
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2344 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), and null_val_bit_pattern().

2346  {
2347  if (ti.get_notnull()) {
2348  return false;
2349  }
2350  if (val.isInt()) {
2351  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2352  }
2353  if (val.isPair()) {
2354  return !val.i2;
2355  }
2356  if (val.isStr()) {
2357  return !val.i1;
2358  }
2359  CHECK(val.isNull());
2360  return true;
2361 }
bool isPair() const
Definition: TargetValue.h:65
bool isStr() const
Definition: TargetValue.h:69
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isNull() const
Definition: TargetValue.h:67
bool isInt() const
Definition: TargetValue.h:63
#define CHECK(condition)
Definition: Logger.h:291
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:388

+ Here is the call graph for this function:

bool ResultSet::isNullIval ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  ival 
)
static

Definition at line 1126 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1128  {
1129  return ti.is_any<kDOUBLE>() ? shared::bit_cast<double>(ival) == NULL_DOUBLE
1130  : ti.is_any<kFLOAT>() ? shared::bit_cast<float>(ival) == NULL_FLOAT
1131  : ti.is_string() ? translate_strings ? ival == NULL_INT : ival == 0
1132  : ival == inline_int_null_val(ti);
1133 }
#define NULL_DOUBLE
#define NULL_FLOAT
#define NULL_INT
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 452 of file ResultSet.h.

References permutation_.

452 { return permutation_.empty(); };
Permutation permutation_
Definition: ResultSet.h:945
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 284 of file ResultSetIteration.cpp.

284  {
285  if (logical_index >= entryCount()) {
286  return true;
287  }
288  const auto entry_idx =
289  permutation_.empty() ? logical_index : permutation_[logical_index];
290  const auto storage_lookup_result = findStorage(entry_idx);
291  const auto storage = storage_lookup_result.storage_ptr;
292  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293  return storage->isEmptyEntry(local_entry_idx);
294 }
Permutation permutation_
Definition: ResultSet.h:945
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool ResultSet::isTruncated ( ) const

Definition at line 736 of file ResultSet.cpp.

736  {
737  return keep_first_ + drop_first_;
738 }
size_t keep_first_
Definition: ResultSet.h:943
size_t drop_first_
Definition: ResultSet.h:942
bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 748 of file ResultSet.cpp.

748  {
749  return for_validation_only_;
750 }
bool for_validation_only_
Definition: ResultSet.h:973
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1487 of file ResultSet.cpp.

References Projection, and TableFunction.

1487  {
1492  appended_storage_.empty() && storage_ &&
1493  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1494 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
QueryDescriptionType getQueryDescriptionType() const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 54 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

54  {
56  keep_first_ = n;
57 }
size_t keep_first_
Definition: ResultSet.h:943
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:605
constexpr double n
Definition: Utm.h:38
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 649 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

651  {
652  if (!lazy_fetch_info_.empty()) {
653  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
654  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
655  if (col_lazy_fetch.is_lazily_fetched) {
656  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
657  col_buffers_.size());
658  int64_t ival_copy = ival;
659  auto& frag_col_buffers =
660  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
661  target_logical_idx,
662  ival_copy);
663  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
664  CHECK_LT(target_logical_idx, targets_.size());
665  const TargetInfo& target_info = targets_[target_logical_idx];
666  CHECK(!target_info.is_agg);
667  if (target_info.sql_type.is_string() &&
668  target_info.sql_type.get_compression() == kENCODING_NONE) {
669  VarlenDatum vd;
670  bool is_end{false};
672  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
673  storage_lookup_result.fixedup_entry_idx,
674  false,
675  &vd,
676  &is_end);
677  CHECK(!is_end);
678  if (vd.is_null) {
679  return 0;
680  }
681  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
682  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
683  }
684  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
685  }
686  }
687  return ival;
688 }
bool is_null
Definition: Datum.h:55
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
int8_t * pointer
Definition: Datum.h:54
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
#define CHECK_LT(x, y)
Definition: Logger.h:303
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: Datum.h:53

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1510 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), getVarlenOutputInfo(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kENCODING_GEOINT, kLINESTRING, kMULTILINESTRING, kMULTIPOINT, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, QueryMemoryDescriptor::slotIsVarlenOutput(), TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1514  {
1515  CHECK(target_info.sql_type.is_geometry());
1516 
1517  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1518  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1519  };
1520 
1521  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1522  const auto storage_info = findStorage(entry_buff_idx);
1523  auto crt_geo_col_ptr = geo_target_ptr;
1524  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1525  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1526  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1527  }
1528  // adjusting the column pointer to represent a pointer to the geo target value
1529  return crt_geo_col_ptr +
1530  storage_info.fixedup_entry_idx *
1531  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1532  slot_idx + range);
1533  };
1534 
1535  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1537  ? getNextTargetBufferColWise(slot_idx, range)
1538  : getNextTargetBufferRowWise(slot_idx, range);
1539  };
1540 
1541  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1542  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1544  };
1545 
1546  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1547  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1549  };
1550 
1551  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1552  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1554  };
1555 
1556  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1557  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1559  };
1560 
1561  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1562  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1564  };
1565 
1566  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1567  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1569  };
1570 
1571  auto getFragColBuffers = [&]() -> decltype(auto) {
1572  const auto storage_idx = getStorageIndex(entry_buff_idx);
1573  CHECK_LT(storage_idx.first, col_buffers_.size());
1574  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1575  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1576  };
1577 
1578  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1579 
1580  auto getDataMgr = [&]() {
1581  auto executor = query_mem_desc_.getExecutor();
1582  CHECK(executor);
1583  return executor->getDataMgr();
1584  };
1585 
1586  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1587  const auto storage_idx = getStorageIndex(entry_buff_idx);
1588  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1589  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1590  return varlen_buffer;
1591  };
1592 
1593  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1594  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1595  return TargetValue(nullptr);
1596  }
1597 
1598  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1599  if (!lazy_fetch_info_.empty()) {
1600  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1601  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1602  }
1603 
1604  switch (target_info.sql_type.get_type()) {
1605  case kPOINT: {
1606  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1607  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1608  CHECK(varlen_output_info);
1609  auto geo_data_ptr = read_int_from_buff(
1610  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1611  auto cpu_data_ptr =
1612  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1613  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1614  target_info.sql_type,
1616  /*data_mgr=*/nullptr,
1617  /*is_gpu_fetch=*/false,
1618  device_id_,
1619  cpu_data_ptr,
1620  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1621  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1622  const auto& varlen_buffer = getSeparateVarlenStorage();
1623  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1624  varlen_buffer.size());
1625 
1626  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1627  target_info.sql_type,
1629  nullptr,
1630  false,
1631  device_id_,
1632  reinterpret_cast<int64_t>(
1633  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1634  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1635  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1636  const auto& frag_col_buffers = getFragColBuffers();
1637  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1638  target_info.sql_type,
1640  frag_col_buffers[col_lazy_fetch->local_col_id],
1641  getCoordsDataPtr(geo_target_ptr));
1642  } else {
1643  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1644  target_info.sql_type,
1646  is_gpu_fetch ? getDataMgr() : nullptr,
1647  is_gpu_fetch,
1648  device_id_,
1649  getCoordsDataPtr(geo_target_ptr),
1650  getCoordsLength(geo_target_ptr));
1651  }
1652  break;
1653  }
1654  case kMULTIPOINT: {
1655  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1656  const auto& varlen_buffer = getSeparateVarlenStorage();
1657  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1658  varlen_buffer.size());
1659 
1660  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1661  target_info.sql_type,
1663  nullptr,
1664  false,
1665  device_id_,
1666  reinterpret_cast<int64_t>(
1667  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1668  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1669  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1670  const auto& frag_col_buffers = getFragColBuffers();
1671  return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1672  target_info.sql_type,
1674  frag_col_buffers[col_lazy_fetch->local_col_id],
1675  getCoordsDataPtr(geo_target_ptr));
1676  } else {
1677  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1678  target_info.sql_type,
1680  is_gpu_fetch ? getDataMgr() : nullptr,
1681  is_gpu_fetch,
1682  device_id_,
1683  getCoordsDataPtr(geo_target_ptr),
1684  getCoordsLength(geo_target_ptr));
1685  }
1686  break;
1687  }
1688  case kLINESTRING: {
1689  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1690  const auto& varlen_buffer = getSeparateVarlenStorage();
1691  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1692  varlen_buffer.size());
1693 
1694  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1695  target_info.sql_type,
1697  nullptr,
1698  false,
1699  device_id_,
1700  reinterpret_cast<int64_t>(
1701  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1702  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1703  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1704  const auto& frag_col_buffers = getFragColBuffers();
1705  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1706  target_info.sql_type,
1708  frag_col_buffers[col_lazy_fetch->local_col_id],
1709  getCoordsDataPtr(geo_target_ptr));
1710  } else {
1711  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1712  target_info.sql_type,
1714  is_gpu_fetch ? getDataMgr() : nullptr,
1715  is_gpu_fetch,
1716  device_id_,
1717  getCoordsDataPtr(geo_target_ptr),
1718  getCoordsLength(geo_target_ptr));
1719  }
1720  break;
1721  }
1722  case kMULTILINESTRING: {
1723  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1724  const auto& varlen_buffer = getSeparateVarlenStorage();
1725  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1726  varlen_buffer.size());
1727 
1728  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1729  target_info.sql_type,
1731  nullptr,
1732  false,
1733  device_id_,
1734  reinterpret_cast<int64_t>(
1735  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1736  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1737  reinterpret_cast<int64_t>(
1738  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1739  static_cast<int64_t>(
1740  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1741  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1742  const auto& frag_col_buffers = getFragColBuffers();
1743 
1744  return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1745  target_info.sql_type,
1747  frag_col_buffers[col_lazy_fetch->local_col_id],
1748  getCoordsDataPtr(geo_target_ptr),
1749  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1750  getCoordsDataPtr(geo_target_ptr));
1751  } else {
1752  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1753  target_info.sql_type,
1755  is_gpu_fetch ? getDataMgr() : nullptr,
1756  is_gpu_fetch,
1757  device_id_,
1758  getCoordsDataPtr(geo_target_ptr),
1759  getCoordsLength(geo_target_ptr),
1760  getRingSizesPtr(geo_target_ptr),
1761  getRingSizesLength(geo_target_ptr) * 4);
1762  }
1763  break;
1764  }
1765  case kPOLYGON: {
1766  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1767  const auto& varlen_buffer = getSeparateVarlenStorage();
1768  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1769  varlen_buffer.size());
1770 
1771  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1772  target_info.sql_type,
1774  nullptr,
1775  false,
1776  device_id_,
1777  reinterpret_cast<int64_t>(
1778  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1779  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1780  reinterpret_cast<int64_t>(
1781  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1782  static_cast<int64_t>(
1783  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1784  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1785  const auto& frag_col_buffers = getFragColBuffers();
1786 
1787  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1788  target_info.sql_type,
1790  frag_col_buffers[col_lazy_fetch->local_col_id],
1791  getCoordsDataPtr(geo_target_ptr),
1792  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1793  getCoordsDataPtr(geo_target_ptr));
1794  } else {
1795  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1796  target_info.sql_type,
1798  is_gpu_fetch ? getDataMgr() : nullptr,
1799  is_gpu_fetch,
1800  device_id_,
1801  getCoordsDataPtr(geo_target_ptr),
1802  getCoordsLength(geo_target_ptr),
1803  getRingSizesPtr(geo_target_ptr),
1804  getRingSizesLength(geo_target_ptr) * 4);
1805  }
1806  break;
1807  }
1808  case kMULTIPOLYGON: {
1809  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1810  const auto& varlen_buffer = getSeparateVarlenStorage();
1811  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1812  varlen_buffer.size());
1813 
1814  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1815  target_info.sql_type,
1817  nullptr,
1818  false,
1819  device_id_,
1820  reinterpret_cast<int64_t>(
1821  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1822  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1823  reinterpret_cast<int64_t>(
1824  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1825  static_cast<int64_t>(
1826  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1827  reinterpret_cast<int64_t>(
1828  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1829  static_cast<int64_t>(
1830  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1831  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1832  const auto& frag_col_buffers = getFragColBuffers();
1833 
1834  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1835  target_info.sql_type,
1837  frag_col_buffers[col_lazy_fetch->local_col_id],
1838  getCoordsDataPtr(geo_target_ptr),
1839  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1840  getCoordsDataPtr(geo_target_ptr),
1841  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1842  getCoordsDataPtr(geo_target_ptr));
1843  } else {
1844  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1845  target_info.sql_type,
1847  is_gpu_fetch ? getDataMgr() : nullptr,
1848  is_gpu_fetch,
1849  device_id_,
1850  getCoordsDataPtr(geo_target_ptr),
1851  getCoordsLength(geo_target_ptr),
1852  getRingSizesPtr(geo_target_ptr),
1853  getRingSizesLength(geo_target_ptr) * 4,
1854  getPolyRingsPtr(geo_target_ptr),
1855  getPolyRingsLength(geo_target_ptr) * 4);
1856  }
1857  break;
1858  }
1859  default:
1860  throw std::runtime_error("Unknown Geometry type encountered: " +
1861  target_info.sql_type.get_type_name());
1862  }
1863  UNREACHABLE();
1864  return TargetValue(nullptr);
1865 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
GeoReturnType geo_return_type_
Definition: ResultSet.h:978
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define UNREACHABLE()
Definition: Logger.h:337
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:969
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
bool is_agg
Definition: TargetInfo.h:50
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:389
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
std::string get_type_name() const
Definition: sqltypes.h:507
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:592
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ScalarTargetValue ResultSet::makeStringTargetValue ( SQLTypeInfo const &  chosen_type,
bool const  translate_strings,
int64_t const  ival 
) const
private

Definition at line 1882 of file ResultSetIteration.cpp.

References getString(), and NULL_INT.

Referenced by convertToScalarTargetValue(), and makeTargetValue().

1884  {
1885  if (translate_strings) {
1886  if (static_cast<int32_t>(ival) == NULL_INT) { // TODO(alex): this isn't nice, fix it
1887  return NullableString(nullptr);
1888  } else {
1889  return NullableString(getString(chosen_type, ival));
1890  }
1891  } else {
1892  return static_cast<int64_t>(static_cast<int32_t>(ival));
1893  }
1894 }
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1897 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, calculateQuantile(), CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, convertToScalarTargetValue(), count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), isNullIval(), kAPPROX_QUANTILE, kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kMODE, kSINGLE_VALUE, kSUM, kSUM_IF, result_set::lazy_decode(), lazy_fetch_info_, makeStringTargetValue(), NULL_DOUBLE, nullScalarTargetValue(), query_mem_desc_, read_int_from_buff(), and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1903  {
1904  auto actual_compact_sz = compact_sz;
1905  const auto& type_info = target_info.sql_type;
1906  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1908  actual_compact_sz = sizeof(float);
1909  } else {
1910  actual_compact_sz = sizeof(double);
1911  }
1912  if (target_info.is_agg &&
1913  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1914  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
1915  target_info.agg_kind == kMAX || target_info.agg_kind == kSINGLE_VALUE)) {
1916  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1917  // padded size to 4 bytes to properly read each value.
1918  actual_compact_sz = sizeof(float);
1919  }
1920  }
1921  if (get_compact_type(target_info).is_date_in_days()) {
1922  // Dates encoded in days are converted to 8 byte values on read.
1923  actual_compact_sz = sizeof(int64_t);
1924  }
1925 
1926  // String dictionary keys are read as 32-bit values regardless of encoding
1927  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1928  type_info.getStringDictKey().dict_id) {
1929  actual_compact_sz = sizeof(int32_t);
1930  }
1931 
1932  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1933  const auto& chosen_type = get_compact_type(target_info);
1934  if (!lazy_fetch_info_.empty()) {
1935  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1936  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1937  if (col_lazy_fetch.is_lazily_fetched) {
1938  CHECK_GE(ival, 0);
1939  const auto storage_idx = getStorageIndex(entry_buff_idx);
1940  CHECK_LT(storage_idx.first, col_buffers_.size());
1941  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1942  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1943  ival = result_set::lazy_decode(
1944  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1945  if (chosen_type.is_fp()) {
1946  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1947  if (chosen_type.get_type() == kFLOAT) {
1948  return ScalarTargetValue(static_cast<float>(dval));
1949  } else {
1950  return ScalarTargetValue(dval);
1951  }
1952  }
1953  }
1954  }
1955  if (target_info.agg_kind == kMODE) {
1956  if (!isNullIval(chosen_type, translate_strings, ival)) {
1957  auto const* const* const agg_mode = reinterpret_cast<AggMode const* const*>(ptr);
1958  if (std::optional<int64_t> const mode = (*agg_mode)->mode()) {
1959  return convertToScalarTargetValue(chosen_type, translate_strings, *mode);
1960  }
1961  }
1962  return nullScalarTargetValue(chosen_type, translate_strings);
1963  }
1964  if (chosen_type.is_fp()) {
1965  if (target_info.agg_kind == kAPPROX_QUANTILE) {
1966  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
1967  ? NULL_DOUBLE // sql_validate / just_validate
1968  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
1969  }
1970  switch (actual_compact_sz) {
1971  case 8: {
1972  const auto dval = *reinterpret_cast<const double*>(ptr);
1973  return chosen_type.get_type() == kFLOAT
1974  ? ScalarTargetValue(static_cast<const float>(dval))
1975  : ScalarTargetValue(dval);
1976  }
1977  case 4: {
1978  CHECK_EQ(kFLOAT, chosen_type.get_type());
1979  return *reinterpret_cast<const float*>(ptr);
1980  }
1981  default:
1982  CHECK(false);
1983  }
1984  }
1985  if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
1986  chosen_type.is_timeinterval()) {
1987  if (is_distinct_target(target_info)) {
1989  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1990  }
1991  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1992  // right type instead
1993  if (inline_int_null_val(chosen_type) ==
1994  int_resize_cast(ival, chosen_type.get_logical_size())) {
1995  return inline_int_null_val(type_info);
1996  }
1997  return ival;
1998  }
1999  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
2000  return makeStringTargetValue(chosen_type, translate_strings, ival);
2001  }
2002  if (chosen_type.is_decimal()) {
2003  if (decimal_to_double) {
2004  if (target_info.is_agg &&
2005  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
2006  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
2007  target_info.agg_kind == kMAX) &&
2008  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
2009  return NULL_DOUBLE;
2010  }
2011  if (!chosen_type.get_notnull() &&
2012  ival ==
2013  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
2014  return NULL_DOUBLE;
2015  }
2016  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
2017  }
2018  return ival;
2019  }
2020  CHECK(false);
2021  return TargetValue(int64_t(0));
2022 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
static ScalarTargetValue nullScalarTargetValue(SQLTypeInfo const &, bool const translate_strings)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:306
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
static bool isNullIval(SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
Definition: sqldefs.h:75
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_agg
Definition: TargetInfo.h:50
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
ScalarTargetValue convertToScalarTargetValue(SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
Definition: sqldefs.h:77
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1035
SQLAgg agg_kind
Definition: TargetInfo.h:51
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:559
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:303
bool is_date_in_days() const
Definition: sqltypes.h:988
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
Definition: sqldefs.h:76
Definition: sqldefs.h:74
Definition: sqldefs.h:83
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1358 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), FlatBufferManager::isFlatBuffer(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and VarlenArray_get_nth().

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1365  {
1366  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1367  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1368  if (varlen_ptr < 0) {
1369  CHECK_EQ(-1, varlen_ptr);
1370  if (target_info.sql_type.get_type() == kARRAY) {
1371  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1372  }
1373  return TargetValue(nullptr);
1374  }
1375  const auto storage_idx = getStorageIndex(entry_buff_idx);
1376  if (target_info.sql_type.is_string()) {
1377  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1378  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1379  const auto& varlen_buffer_for_storage =
1380  serialized_varlen_buffer_[storage_idx.first];
1381  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1382  return varlen_buffer_for_storage[varlen_ptr];
1383  } else if (target_info.sql_type.get_type() == kARRAY) {
1384  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1385  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1386  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1387 
1388  return build_array_target_value(
1389  target_info.sql_type,
1390  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1391  varlen_buffer[varlen_ptr].size(),
1392  translate_strings,
1394  } else {
1395  CHECK(false);
1396  }
1397  }
1398  if (!lazy_fetch_info_.empty()) {
1399  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1400  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1401  if (col_lazy_fetch.is_lazily_fetched) {
1402  const auto storage_idx = getStorageIndex(entry_buff_idx);
1403  CHECK_LT(storage_idx.first, col_buffers_.size());
1404  auto& frag_col_buffers =
1405  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1406  bool is_end{false};
1407  auto col_buf = const_cast<int8_t*>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1408  if (target_info.sql_type.is_string()) {
1409  VarlenDatum vd;
1411  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, false, &vd, &is_end);
1412  CHECK(!is_end);
1413  if (vd.is_null) {
1414  return TargetValue(nullptr);
1415  }
1416  CHECK(vd.pointer);
1417  CHECK_GT(vd.length, 0u);
1418  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1419  return fetched_str;
1420  } else {
1421  CHECK(target_info.sql_type.is_array());
1422  ArrayDatum ad;
1423  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1424  VarlenArray_get_nth(col_buf, varlen_ptr, &ad, &is_end);
1425  } else {
1427  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1428  }
1429  CHECK(!is_end);
1430  if (ad.is_null) {
1431  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1432  }
1433  CHECK_GE(ad.length, 0u);
1434  if (ad.length > 0) {
1435  CHECK(ad.pointer);
1436  }
1437  return build_array_target_value(target_info.sql_type,
1438  ad.pointer,
1439  ad.length,
1440  translate_strings,
1442  }
1443  }
1444  }
1445  if (!varlen_ptr) {
1446  if (target_info.sql_type.is_array()) {
1447  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1448  }
1449  return TargetValue(nullptr);
1450  }
1451  auto length = read_int_from_buff(ptr2, compact_sz2);
1452  if (target_info.sql_type.is_array()) {
1453  const auto& elem_ti = target_info.sql_type.get_elem_type();
1454  length *= elem_ti.get_array_context_logical_size();
1455  }
1456  std::vector<int8_t> cpu_buffer;
1457  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1458  cpu_buffer.resize(length);
1459  const auto executor = query_mem_desc_.getExecutor();
1460  CHECK(executor);
1461  auto data_mgr = executor->getDataMgr();
1462  auto allocator = std::make_unique<CudaAllocator>(
1463  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1464 
1465  allocator->copyFromDevice(
1466  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1467  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1468  }
1469  if (target_info.sql_type.is_array()) {
1470  return build_array_target_value(target_info.sql_type,
1471  reinterpret_cast<const int8_t*>(varlen_ptr),
1472  length,
1473  translate_strings,
1475  }
1476  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1477 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
bool is_null
Definition: Datum.h:55
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:306
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:969
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
#define CHECK_GT(x, y)
Definition: Logger.h:305
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: Datum.h:54
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:219
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool is_agg
Definition: TargetInfo.h:50
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:181
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:389
int get_array_context_logical_size() const
Definition: sqltypes.h:678
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:957
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:291
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:956
bool separate_varlen_storage_valid_
Definition: ResultSet.h:970
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
bool is_string() const
Definition: sqltypes.h:580
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:186
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:963
bool is_array() const
Definition: sqltypes.h:588
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
DEVICE void VarlenArray_get_nth(int8_t *buf, int n, ArrayDatum *result, bool *is_end)
Definition: sqltypes.h:1503
size_t length
Definition: Datum.h:53
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 731 of file ResultSet.cpp.

731  {
732  crt_row_buff_idx_ = 0;
733  fetched_so_far_ = 0;
734 }
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
ScalarTargetValue ResultSet::nullScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings 
)
static

Definition at line 1101 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1102  {
1103  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(NULL_DOUBLE)
1105  : ti.is_string() ? translate_strings
1106  ? ScalarTargetValue(NullableString(nullptr))
1107  : ScalarTargetValue(static_cast<int64_t>(NULL_INT))
1109 }
#define NULL_DOUBLE
#define NULL_FLOAT
bool is_any(T &&value)
Definition: misc.h:258
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 629 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), threading_serial::parallel_reduce(), and logger::thread_local_ids().

629  {
630  using namespace threading;
631  auto execute_parallel_row_count =
632  [this, parent_thread_local_ids = logger::thread_local_ids()](
633  const blocked_range<size_t>& r, size_t row_count) {
634  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
635  for (size_t i = r.begin(); i < r.end(); ++i) {
636  if (!isRowAtEmpty(i)) {
637  ++row_count;
638  }
639  }
640  return row_count;
641  };
642  const auto row_count = parallel_reduce(blocked_range<size_t>(0, entryCount()),
643  size_t(0),
644  execute_parallel_row_count,
645  std::plus<int>());
646  return get_truncated_row_count(row_count, getLimit(), drop_first_);
647 }
size_t getLimit() const
Definition: ResultSet.cpp:1397
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t drop_first_
Definition: ResultSet.h:942
Value parallel_reduce(const blocked_range< Int > &range, const Value &identity, const RealBody &real_body, const Reduction &reduction, const Partitioner &p=Partitioner())
Parallel iteration with reduction.
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:874

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 866 of file ResultSet.cpp.

References gpu_enabled::copy(), cpu_threads(), DEBUG_TIMER, threading_std::task_group::run(), logger::thread_local_ids(), and threading_std::task_group::wait().

868  {
869  auto timer = DEBUG_TIMER(__func__);
870  const size_t nthreads = cpu_threads();
871 
872  // Split permutation_ into nthreads subranges and top-sort in-place.
874  std::vector<PermutationView> permutation_views(nthreads);
875  threading::task_group top_sort_threads;
876  for (auto interval : makeIntervals<PermutationIdx>(0, permutation_.size(), nthreads)) {
877  top_sort_threads.run([this,
878  &order_entries,
879  &permutation_views,
880  top_n,
881  executor,
882  parent_thread_local_ids = logger::thread_local_ids(),
883  interval] {
884  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
885  PermutationView pv(permutation_.data() + interval.begin, 0, interval.size());
886  pv = initPermutationBuffer(pv, interval.begin, interval.end);
887  const auto compare = createComparator(order_entries, pv, executor, true);
888  permutation_views[interval.index] = topPermutation(pv, top_n, compare);
889  });
890  }
891  top_sort_threads.wait();
892 
893  // In case you are considering implementing a parallel reduction, note that the
894  // ResultSetComparator constructor is O(N) in order to materialize some of the aggregate
895  // columns as necessary to perform a comparison. This cost is why reduction is chosen to
896  // be serial instead; only one more Comparator is needed below.
897 
898  // Left-copy disjoint top-sorted subranges into one contiguous range.
899  // ++++....+++.....+++++... -> ++++++++++++............
900  auto end = permutation_.begin() + permutation_views.front().size();
901  for (size_t i = 1; i < nthreads; ++i) {
902  std::copy(permutation_views[i].begin(), permutation_views[i].end(), end);
903  end += permutation_views[i].size();
904  }
905 
906  // Top sort final range.
907  PermutationView pv(permutation_.data(), end - permutation_.begin());
908  const auto compare = createComparator(order_entries, pv, executor, false);
909  pv = topPermutation(pv, top_n, compare);
910  permutation_.resize(pv.size());
911  permutation_.shrink_to_fit();
912 }
Permutation permutation_
Definition: ResultSet.h:945
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:846
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:869
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1303
#define DEBUG_TIMER(name)
Definition: Logger.h:411
int cpu_threads()
Definition: thread_count.h:25
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:874

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1357 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

1358  {
1359  auto timer = DEBUG_TIMER(__func__);
1361  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
1362  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
1363  CHECK_EQ(size_t(1), order_entries.size());
1364  auto buffer_ptr = storage_->getUnderlyingBuffer();
1365  for (const auto& order_entry : order_entries) {
1366  const auto target_idx = order_entry.tle_no - 1;
1367  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
1368  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1369  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1370  sort_groups_cpu(sortkey_val_buff,
1371  &idx_buff[0],
1373  order_entry.is_desc,
1374  chosen_bytes);
1375  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
1376  &idx_buff[0],
1378  &tmp_buff[0],
1379  sizeof(int64_t));
1380  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
1381  ++target_idx) {
1382  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
1383  continue;
1384  }
1385  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1386  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
1387  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1388  apply_permutation_cpu(satellite_val_buff,
1389  &idx_buff[0],
1391  &tmp_buff[0],
1392  chosen_bytes);
1393  }
1394  }
1395 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1317 of file ResultSet.cpp.

References CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, Catalog_Namespace::SysCatalog::getDataMgr(), getQueryEngineCudaStreamForDevice(), GPU, inplace_sort_gpu(), Catalog_Namespace::SysCatalog::instance(), and KernelPerFragment.

1318  {
1319  auto timer = DEBUG_TIMER(__func__);
1321  const int device_id{0};
1322  auto allocator = std::make_unique<CudaAllocator>(
1323  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
1324  CHECK_GT(block_size_, 0);
1325  CHECK_GT(grid_size_, 0);
1326  std::vector<int64_t*> group_by_buffers(block_size_);
1327  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
1328  auto dev_group_by_buffers =
1329  create_dev_group_by_buffers(allocator.get(),
1330  group_by_buffers,
1332  block_size_,
1333  grid_size_,
1334  device_id,
1336  /*num_input_rows=*/-1,
1337  /*prepend_index_buffer=*/true,
1338  /*always_init_group_by_on_host=*/true,
1339  /*use_bump_allocator=*/false,
1340  /*has_varlen_output=*/false,
1341  /*insitu_allocator*=*/nullptr);
1343  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
1345  *allocator,
1346  group_by_buffers,
1347  query_mem_desc_.getBufferSizeBytes(ExecutorDeviceType::GPU),
1348  dev_group_by_buffers.data,
1350  block_size_,
1351  grid_size_,
1352  device_id,
1353  /*use_bump_allocator=*/false,
1354  /*has_varlen_output=*/false);
1355 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *device_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, const bool has_varlen_output, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:70
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:305
unsigned block_size_
Definition: ResultSet.h:947
Data_Namespace::DataMgr & getDataMgr() const
Definition: SysCatalog.h:234
static SysCatalog & instance()
Definition: SysCatalog.h:343
unsigned grid_size_
Definition: ResultSet.h:948
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
#define DEBUG_TIMER(name)
Definition: Logger.h:411
void copy_group_by_buffers_from_gpu(DeviceAllocator &device_allocator, const std::vector< int64_t * > &group_by_buffers, const size_t groups_buffer_size, const int8_t *group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer, const bool has_varlen_output)

+ Here is the call graph for this function:

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step)

Note that this can be less than or equal to the value returned by ResultSet::getEntries(), whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function references/sets a cached value (cached_row_count_) so that the cost of computing the result is only paid once per result set.

If the actual row count is not cached and needs to be computed, in some cases that can be O(1) (i.e. if limits and offsets are present, or for the output of a table function). For projections, we use a binary search, so it is O(log n), otherwise it is O(n) (with n being ResultSet::entryCount()), which will be run in parallel if the entry count >= the default of 20000 or if force_parallel is set to true

Note that we currently do not invalidate the cache if the result set is changed (i.e appended to), so this function should only be called after the result set is finalized.

Parameters
force_parallelForces the row count to be computed in parallel if the row count cannot be otherwise be computed from metadata or via a binary search (otherwise parallel search is automatically used for result sets with entryCount() >= 20000)

Definition at line 593 of file ResultSet.cpp.

References CHECK_GE, and uninitialized_cached_row_count.

593  {
594  // cached_row_count_ is atomic, so fetch it into a local variable first
595  // to avoid repeat fetches
596  const int64_t cached_row_count = cached_row_count_;
597  if (cached_row_count != uninitialized_cached_row_count) {
598  CHECK_GE(cached_row_count, 0);
599  return cached_row_count;
600  }
601  setCachedRowCount(rowCountImpl(force_parallel));
602  return cached_row_count_;
603 }
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974
size_t rowCountImpl(const bool force_parallel) const
Definition: ResultSet.cpp:555
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
void setCachedRowCount(const size_t row_count) const
Definition: ResultSet.cpp:609
size_t ResultSet::rowCountImpl ( const bool  force_parallel) const
private

Definition at line 555 of file ResultSet.cpp.

References CHECK, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), Projection, and TableFunction.

555  {
556  if (just_explain_) {
557  return 1;
558  }
560  return entryCount();
561  }
562  if (!permutation_.empty()) {
563  // keep_first_ corresponds to SQL LIMIT
564  // drop_first_ corresponds to SQL OFFSET
566  }
567  if (!storage_) {
568  return 0;
569  }
570  CHECK(permutation_.empty());
572  return binSearchRowCount();
573  }
574 
575  constexpr size_t auto_parallel_row_count_threshold{20000UL};
576  if (force_parallel || entryCount() >= auto_parallel_row_count_threshold) {
577  return parallelRowCount();
578  }
579  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
580  moveToBegin();
581  size_t row_count{0};
582  while (true) {
583  auto crt_row = getNextRowUnlocked(false, false);
584  if (crt_row.empty()) {
585  break;
586  }
587  ++row_count;
588  }
589  moveToBegin();
590  return row_count;
591 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:975
Permutation permutation_
Definition: ResultSet.h:945
void moveToBegin() const
Definition: ResultSet.cpp:731
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:972
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:539
size_t parallelRowCount() const
Definition: ResultSet.cpp:629
size_t drop_first_
Definition: ResultSet.h:942
QueryDescriptionType getQueryDescriptionType() const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:291
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
size_t binSearchRowCount() const
Definition: ResultSet.cpp:616

+ Here is the call graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 201 of file ResultSet.h.

Referenced by rowIterator().

203  {
204  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
205 
206  // move to first logical position
207  ++rowIterator;
208 
209  for (size_t index = 0; index < from_logical_index; index++) {
210  ++rowIterator;
211  }
212 
213  return rowIterator;
214  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:201

+ Here is the caller graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 216 of file ResultSet.h.

References rowIterator().

217  {
218  return rowIterator(0, translate_strings, decimal_to_double);
219  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:201

+ Here is the call graph for this function:

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const
void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private
void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private
void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private
void ResultSet::setCached ( bool  val)
inline

Definition at line 493 of file ResultSet.h.

References cached_.

493 { cached_ = val; }
bool cached_
Definition: ResultSet.h:981
void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 609 of file ResultSet.cpp.

References CHECK, and uninitialized_cached_row_count.

609  {
610  const int64_t signed_row_count = static_cast<int64_t>(row_count);
611  const int64_t old_cached_row_count = cached_row_count_.exchange(signed_row_count);
612  CHECK(old_cached_row_count == uninitialized_cached_row_count ||
613  old_cached_row_count == signed_row_count);
614 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:974