OmniSciDB  a987f07e93
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowIterationState
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
std::string toString () const
 
std::string summaryToString () const
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &, std::shared_ptr< VarlenOutputInfo >=nullptr) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, size_t top_n, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step) More...
 
void invalidateCachedRowCount () const
 
void setCachedRowCount (const size_t row_count) const
 
bool isEmpty () const
 Returns a boolean signifying whether there are valid entries in the result set. More...
 
size_t entryCount () const
 Returns the number of entries the result set is allocated to hold. More...
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
std::string getExplanation () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
std::string getString (SQLTypeInfo const &, int64_t const ival) const
 
ScalarTargetValue convertToScalarTargetValue (SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
 
bool isLessThan (SQLTypeInfo const &, int64_t const lhs, int64_t const rhs) const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const PermutationgetPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
ResultSetPtr copy ()
 
void clearPermutation ()
 
void initStatus ()
 
void invalidateResultSetChunks ()
 
const bool isEstimator () const
 
void setCached (bool val)
 
const bool isCached () const
 
void setExecTime (const long exec_time)
 
const long getExecTime () const
 
void setQueryPlanHash (const QueryPlanHash query_plan)
 
const QueryPlanHash getQueryPlanHash ()
 
std::unordered_set< size_t > getInputTableKeys () const
 
void setInputTableKeys (std::unordered_set< size_t > &&intput_table_keys)
 
void setTargetMetaInfo (const std::vector< TargetMetaInfo > &target_meta_info)
 
std::vector< TargetMetaInfogetTargetMetaInfo ()
 
std::optional< bool > canUseSpeculativeTopNSort () const
 
void setUseSpeculativeTopNSort (bool value)
 
const bool hasValidBuffer () const
 
unsigned getBlockSize () const
 
unsigned getGridSize () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
bool areAnyColumnsLazyFetched () const
 
size_t getNumColumnsLazyFetched () const
 
void setSeparateVarlenStorageValid (const bool val)
 
const std::vector< std::string > getStringDictionaryPayloadCopy (const int dict_id) const
 
const std::pair< std::vector
< int32_t >, std::vector
< std::string > > 
getUniqueStringsForDictEncodedTargetCol (const size_t col_idx) const
 
StringDictionaryProxygetStringDictionaryProxy (int const dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
ChunkStats getTableFunctionChunkStats (const size_t target_idx) const
 
void translateDictEncodedColumns (std::vector< TargetInfo > const &, size_t const start_idx)
 
void eachCellInColumn (RowIterationState &, CellCallback const &)
 
const ExecutorgetExecutor () const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static bool isNullIval (SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
 
static ScalarTargetValue nullScalarTargetValue (SQLTypeInfo const &, bool const translate_strings)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 
static double calculateQuantile (quantile::TDigest *const t_digest)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using ApproxQuantileBuffers = std::vector< std::vector< double >>
 
using ModeBuffers = std::vector< std::vector< int64_t >>
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
ScalarTargetValue makeStringTargetValue (SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
const VarlenOutputInfogetVarlenOutputInfo (const size_t entry_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
Comparator createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
 
PermutationView initPermutationBuffer (PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
size_t rowCountImpl (const bool force_parallel) const
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (CountDistinctSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static PermutationView topPermutation (PermutationView, const size_t n, const Comparator &)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
Permutation permutation_
 
const Catalog_Namespace::Catalogcatalog_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
bool cached_
 
size_t query_exec_time_
 
QueryPlanHash query_plan_
 
std::unordered_set< size_t > input_table_keys_
 
std::vector< TargetMetaInfotarget_meta_info_
 
std::optional< bool > can_use_speculative_top_n_sort
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 157 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::ApproxQuantileBuffers = std::vector<std::vector<double>>
private

Definition at line 821 of file ResultSet.h.

using ResultSet::ModeBuffers = std::vector<std::vector<int64_t>>
private

Definition at line 822 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 968 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 541 of file ResultSet.h.

541  {
544  WktString,
547  GeoTargetValueGpuPtr
549  };
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:187
boost::variant< GeoPointTargetValuePtr, GeoMultiPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoMultiLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:193

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 64 of file ResultSet.cpp.

71  : targets_(targets)
72  , device_type_(device_type)
73  , device_id_(-1)
74  , query_mem_desc_(query_mem_desc)
76  , fetched_so_far_(0)
77  , drop_first_(0)
78  , keep_first_(0)
79  , row_set_mem_owner_(row_set_mem_owner)
80  , catalog_(catalog)
81  , block_size_(block_size)
82  , grid_size_(grid_size)
83  , data_mgr_(nullptr)
85  , just_explain_(false)
86  , for_validation_only_(false)
89  , cached_(false)
90  , query_exec_time_(0)
92  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:974
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
size_t query_exec_time_
Definition: ResultSet.h:984
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:973
unsigned block_size_
Definition: ResultSet.h:948
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
size_t drop_first_
Definition: ResultSet.h:942
bool cached_
Definition: ResultSet.h:982
unsigned grid_size_
Definition: ResultSet.h:949
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:965
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
QueryPlanHash query_plan_
Definition: ResultSet.h:985
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 94 of file ResultSet.cpp.

106  : targets_(targets)
107  , device_type_(device_type)
108  , device_id_(device_id)
109  , query_mem_desc_(query_mem_desc)
110  , crt_row_buff_idx_(0)
111  , fetched_so_far_(0)
112  , drop_first_(0)
113  , keep_first_(0)
114  , row_set_mem_owner_(row_set_mem_owner)
115  , catalog_(catalog)
116  , block_size_(block_size)
117  , grid_size_(grid_size)
118  , lazy_fetch_info_(lazy_fetch_info)
119  , col_buffers_{col_buffers}
120  , frag_offsets_{frag_offsets}
121  , consistent_frag_sizes_{consistent_frag_sizes}
122  , data_mgr_(nullptr)
124  , just_explain_(false)
125  , for_validation_only_(false)
128  , cached_(false)
129  , query_exec_time_(0)
131  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:974
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
size_t query_exec_time_
Definition: ResultSet.h:984
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:973
unsigned block_size_
Definition: ResultSet.h:948
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
size_t drop_first_
Definition: ResultSet.h:942
bool cached_
Definition: ResultSet.h:982
unsigned grid_size_
Definition: ResultSet.h:949
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:965
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:960
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
QueryPlanHash query_plan_
Definition: ResultSet.h:985
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:959
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 165 of file ResultSet.cpp.

References CPU.

167  , device_id_(-1)
168  , fetched_so_far_(0)
170  , explanation_(explanation)
171  , just_explain_(true)
172  , for_validation_only_(false)
175  , cached_(false)
176  , query_exec_time_(0)
178  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:974
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
size_t query_exec_time_
Definition: ResultSet.h:984
const bool just_explain_
Definition: ResultSet.h:973
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
bool cached_
Definition: ResultSet.h:982
std::string explanation_
Definition: ResultSet.h:972
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
QueryPlanHash query_plan_
Definition: ResultSet.h:985
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 180 of file ResultSet.cpp.

References CPU.

184  , device_id_(-1)
185  , fetched_so_far_(0)
186  , row_set_mem_owner_(row_set_mem_owner)
187  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
189  , just_explain_(true)
190  , for_validation_only_(false)
193  , cached_(false)
194  , query_exec_time_(0)
196  , can_use_speculative_top_n_sort(std::nullopt) {}
bool for_validation_only_
Definition: ResultSet.h:974
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
size_t query_exec_time_
Definition: ResultSet.h:984
const bool just_explain_
Definition: ResultSet.h:973
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool cached_
Definition: ResultSet.h:982
QueryExecutionTimings timings_
Definition: ResultSet.h:950
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
size_t fetched_so_far_
Definition: ResultSet.h:941
QueryPlanHash query_plan_
Definition: ResultSet.h:985
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
const int device_id_
Definition: ResultSet.h:936
ResultSet::~ResultSet ( )

Definition at line 198 of file ResultSet.cpp.

References CHECK, CPU, and data_mgr_().

198  {
199  if (storage_) {
200  if (!storage_->buff_is_provided_) {
201  CHECK(storage_->getUnderlyingBuffer());
202  free(storage_->getUnderlyingBuffer());
203  }
204  }
205  for (auto& storage : appended_storage_) {
206  if (storage && !storage->buff_is_provided_) {
207  free(storage->getUnderlyingBuffer());
208  }
209  }
213  }
215  CHECK(data_mgr_);
217  }
218 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:965
int8_t * host_estimator_buffer_
Definition: ResultSet.h:964
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:525
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:963

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 720 of file ResultSet.cpp.

720  {
721  timings_.compilation_queue_time += compilation_queue_time;
722 }
QueryExecutionTimings timings_
Definition: ResultSet.h:950
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &  ,
std::shared_ptr< VarlenOutputInfo = nullptr 
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 301 of file ResultSet.cpp.

References CHECK.

301  {
303  if (!that.storage_) {
304  return;
305  }
306  appended_storage_.push_back(std::move(that.storage_));
309  appended_storage_.back()->query_mem_desc_.getEntryCount());
310  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
311  col_buffers_.insert(
312  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
313  frag_offsets_.insert(
314  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
316  that.consistent_frag_sizes_.begin(),
317  that.consistent_frag_sizes_.end());
318  chunk_iters_.insert(
319  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
321  CHECK(that.separate_varlen_storage_valid_);
323  that.serialized_varlen_buffer_.begin(),
324  that.serialized_varlen_buffer_.end());
325  }
326  for (auto& buff : that.literal_buffers_) {
327  literal_buffers_.push_back(std::move(buff));
328  }
329 }
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:953
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:970
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:607
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:952
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:956
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:960
#define CHECK(condition)
Definition: Logger.h:289
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:959
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
bool ResultSet::areAnyColumnsLazyFetched ( ) const
inline

Definition at line 583 of file ResultSet.h.

References anonymous_namespace{QueryMemoryDescriptor.cpp}::any_of(), and lazy_fetch_info_.

583  {
584  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
585  return std::any_of(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
586  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
bool any_of(std::vector< Analyzer::Expr * > const &target_exprs)

+ Here is the call graph for this function:

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 618 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

618  {
619  if (!storage_) {
620  return 0;
621  }
622 
623  size_t row_count = storage_->binSearchRowCount();
624  for (auto& s : appended_storage_) {
625  row_count += s->binSearchRowCount();
626  }
627 
628  return get_truncated_row_count(row_count, getLimit(), drop_first_);
629 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getLimit() const
Definition: ResultSet.cpp:1399
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:541
size_t drop_first_
Definition: ResultSet.h:942

+ Here is the call graph for this function:

double ResultSet::calculateQuantile ( quantile::TDigest *const  t_digest)
static

Definition at line 1037 of file ResultSet.cpp.

References CHECK, quantile::detail::TDigest< RealType, IndexType >::mergeBufferFinal(), NULL_DOUBLE, and quantile::detail::TDigest< RealType, IndexType >::quantile().

Referenced by makeTargetValue().

1037  {
1038  static_assert(sizeof(int64_t) == sizeof(quantile::TDigest*));
1039  CHECK(t_digest);
1040  t_digest->mergeBufferFinal();
1041  double const quantile = t_digest->quantile();
1042  return boost::math::isnan(quantile) ? NULL_DOUBLE : quantile;
1043 }
#define NULL_DOUBLE
DEVICE RealType quantile(VectorView< IndexType const > const partial_sum, RealType const q) const
Definition: quantile.h:827
DEVICE void mergeBufferFinal()
Definition: quantile.h:651
#define CHECK(condition)
Definition: Logger.h:289

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
std::optional<bool> ResultSet::canUseSpeculativeTopNSort ( ) const
inline

Definition at line 521 of file ResultSet.h.

References can_use_speculative_top_n_sort.

521  {
523  }
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
void ResultSet::clearPermutation ( )
inline

Definition at line 466 of file ResultSet.h.

References permutation_.

Referenced by initStatus().

466  {
467  if (!permutation_.empty()) {
468  permutation_.clear();
469  }
470  }
Permutation permutation_
Definition: ResultSet.h:945

+ Here is the caller graph for this function:

size_t ResultSet::colCount ( ) const

Definition at line 415 of file ResultSet.cpp.

415  {
416  return just_explain_ ? 1 : targets_.size();
417 }
const bool just_explain_
Definition: ResultSet.h:973
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
ScalarTargetValue ResultSet::convertToScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  val 
) const

Definition at line 1090 of file ResultSetIteration.cpp.

References CHECK_EQ, SQLTypeInfo::get_compression(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, kFLOAT, and makeStringTargetValue().

Referenced by makeTargetValue().

1092  {
1093  if (ti.is_string()) {
1094  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1095  return makeStringTargetValue(ti, translate_strings, val);
1096  } else {
1097  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(shared::bit_cast<double>(val))
1098  : ti.is_any<kFLOAT>() ? ScalarTargetValue(shared::bit_cast<float>(val))
1099  : ScalarTargetValue(val);
1100  }
1101 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ResultSetPtr ResultSet::copy ( )

Definition at line 331 of file ResultSet.cpp.

References CHECK, gpu_enabled::copy(), and DEBUG_TIMER.

331  {
332  auto timer = DEBUG_TIMER(__func__);
333  if (!storage_) {
334  return nullptr;
335  }
336 
337  auto executor = getExecutor();
338  CHECK(executor);
339  ResultSetPtr copied_rs = std::make_shared<ResultSet>(targets_,
340  device_type_,
343  executor->getCatalog(),
344  executor->blockSize(),
345  executor->gridSize());
346 
347  auto allocate_and_copy_storage =
348  [&](const ResultSetStorage* prev_storage) -> std::unique_ptr<ResultSetStorage> {
349  const auto& prev_qmd = prev_storage->query_mem_desc_;
350  const auto storage_size = prev_qmd.getBufferSizeBytes(device_type_);
351  auto buff = row_set_mem_owner_->allocate(storage_size, /*thread_idx=*/0);
352  std::unique_ptr<ResultSetStorage> new_storage;
353  new_storage.reset(new ResultSetStorage(
354  prev_storage->targets_, prev_qmd, buff, /*buff_is_provided=*/true));
355  new_storage->target_init_vals_ = prev_storage->target_init_vals_;
356  if (prev_storage->varlen_output_info_) {
357  new_storage->varlen_output_info_ = prev_storage->varlen_output_info_;
358  }
359  memcpy(new_storage->buff_, prev_storage->buff_, storage_size);
360  new_storage->query_mem_desc_ = prev_qmd;
361  return new_storage;
362  };
363 
364  copied_rs->storage_ = allocate_and_copy_storage(storage_.get());
365  if (!appended_storage_.empty()) {
366  for (const auto& storage : appended_storage_) {
367  copied_rs->appended_storage_.push_back(allocate_and_copy_storage(storage.get()));
368  }
369  }
370  std::copy(chunks_.begin(), chunks_.end(), std::back_inserter(copied_rs->chunks_));
371  std::copy(chunk_iters_.begin(),
372  chunk_iters_.end(),
373  std::back_inserter(copied_rs->chunk_iters_));
374  std::copy(col_buffers_.begin(),
375  col_buffers_.end(),
376  std::back_inserter(copied_rs->col_buffers_));
377  std::copy(frag_offsets_.begin(),
378  frag_offsets_.end(),
379  std::back_inserter(copied_rs->frag_offsets_));
382  std::back_inserter(copied_rs->consistent_frag_sizes_));
386  std::back_inserter(copied_rs->serialized_varlen_buffer_));
387  }
388  std::copy(literal_buffers_.begin(),
389  literal_buffers_.end(),
390  std::back_inserter(copied_rs->literal_buffers_));
391  std::copy(lazy_fetch_info_.begin(),
392  lazy_fetch_info_.end(),
393  std::back_inserter(copied_rs->lazy_fetch_info_));
394 
395  copied_rs->permutation_ = permutation_;
396  copied_rs->drop_first_ = drop_first_;
397  copied_rs->keep_first_ = keep_first_;
398  copied_rs->separate_varlen_storage_valid_ = separate_varlen_storage_valid_;
399  copied_rs->query_exec_time_ = query_exec_time_;
400  copied_rs->input_table_keys_ = input_table_keys_;
401  copied_rs->target_meta_info_ = target_meta_info_;
402  copied_rs->geo_return_type_ = geo_return_type_;
403  copied_rs->query_plan_ = query_plan_;
405  copied_rs->can_use_speculative_top_n_sort = can_use_speculative_top_n_sort;
406  }
407 
408  return copied_rs;
409 }
Permutation permutation_
Definition: ResultSet.h:945
AppendedStorage appended_storage_
Definition: ResultSet.h:939
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
std::optional< bool > can_use_speculative_top_n_sort
Definition: ResultSet.h:992
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t query_exec_time_
Definition: ResultSet.h:984
std::shared_ptr< ResultSet > ResultSetPtr
size_t keep_first_
Definition: ResultSet.h:943
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:953
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:970
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
const Executor * getExecutor() const
Definition: ResultSet.h:627
size_t drop_first_
Definition: ResultSet.h:942
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:952
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:956
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:986
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:987
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:960
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
#define DEBUG_TIMER(name)
Definition: Logger.h:407
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
QueryPlanHash query_plan_
Definition: ResultSet.h:985
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:959
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971

+ Here is the call graph for this function:

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copies its content into a contiguous output_buffer

Definition at line 1173 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1175  {
1177  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1178  CHECK(output_buffer_size > 0);
1179  CHECK(output_buffer);
1180  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1181  size_t out_buff_offset = 0;
1182 
1183  // the main storage:
1184  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1185  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1186  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1187  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1188  CHECK(crt_buffer_size <= output_buffer_size);
1189  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1190 
1191  out_buff_offset += crt_buffer_size;
1192 
1193  // the appended storages:
1194  for (size_t i = 0; i < appended_storage_.size(); i++) {
1195  const size_t crt_storage_row_count =
1196  appended_storage_[i]->query_mem_desc_.getEntryCount();
1197  if (crt_storage_row_count == 0) {
1198  // skip an empty appended storage
1199  continue;
1200  }
1201  CHECK_LT(out_buff_offset, output_buffer_size);
1202  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1203  const size_t column_offset =
1204  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1205  const int8_t* storage_buffer =
1206  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1207  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1208  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1209 
1210  out_buff_offset += crt_buffer_size;
1211  }
1212 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:299
#define CHECK(condition)
Definition: Logger.h:289
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1468

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( CountDistinctSet count_distinct_active_buffer_set) const
private
Comparator ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const PermutationView  permutation,
const Executor executor,
const bool  single_threaded 
)
inlineprivate

Definition at line 869 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

872  {
873  auto timer = DEBUG_TIMER(__func__);
875  return [rsc = ResultSetComparator<ColumnWiseTargetAccessor>(
876  order_entries, this, permutation, executor, single_threaded)](
877  const PermutationIdx lhs, const PermutationIdx rhs) {
878  return rsc(lhs, rhs);
879  };
880  } else {
881  return [rsc = ResultSetComparator<RowWiseTargetAccessor>(
882  order_entries, this, permutation, executor, single_threaded)](
883  const PermutationIdx lhs, const PermutationIdx rhs) {
884  return rsc(lhs, rhs);
885  };
886  }
887  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define DEBUG_TIMER(name)
Definition: Logger.h:407

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 670 of file ResultSet.cpp.

670  {
671  return (!storage_ && !estimator_ && !just_explain_) || cached_row_count_ == 0;
672 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const bool just_explain_
Definition: ResultSet.h:973
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:962
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 559 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

559 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 59 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

59  {
61  drop_first_ = n;
62 }
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:607
size_t drop_first_
Definition: ResultSet.h:942
constexpr double n
Definition: Utm.h:38
void ResultSet::eachCellInColumn ( RowIterationState state,
CellCallback const &  func 
)

Definition at line 487 of file ResultSet.cpp.

References advance_slot(), advance_to_next_columnar_target_buff(), ResultSet::RowIterationState::agg_idx_, align_to_int64(), ResultSet::RowIterationState::buf_ptr_, CHECK, CHECK_GE, CHECK_LT, ResultSet::RowIterationState::compact_sz1_, ResultSet::RowIterationState::cur_target_idx_, QueryMemoryDescriptor::didOutputColumnar(), get_cols_ptr(), get_key_bytes_rowwise(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), ResultSet::RowIterationState::prev_target_idx_, read_int_from_buff(), and row_ptr_rowwise().

487  {
488  size_t const target_idx = state.cur_target_idx_;
489  QueryMemoryDescriptor& storage_qmd = storage_->query_mem_desc_;
490  CHECK_LT(target_idx, lazy_fetch_info_.size());
491  auto& col_lazy_fetch = lazy_fetch_info_[target_idx];
492  CHECK(col_lazy_fetch.is_lazily_fetched);
493  int const target_size = storage_->targets_[target_idx].sql_type.get_size();
494  CHECK_LT(0, target_size) << storage_->targets_[target_idx].toString();
495  size_t const nrows = storage_->binSearchRowCount();
496  if (storage_qmd.didOutputColumnar()) {
497  // Logic based on ResultSet::ColumnWiseTargetAccessor::initializeOffsetsForStorage()
498  if (state.buf_ptr_ == nullptr) {
499  state.buf_ptr_ = get_cols_ptr(storage_->buff_, storage_qmd);
500  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
501  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
503  }
504  for (size_t j = state.prev_target_idx_; j < state.cur_target_idx_; ++j) {
505  size_t const next_target_idx = j + 1; // Set state to reflect next target_idx j+1
506  state.buf_ptr_ = advance_to_next_columnar_target_buff(
507  state.buf_ptr_, storage_qmd, state.agg_idx_);
508  auto const& next_agg_info = storage_->targets_[next_target_idx];
509  state.agg_idx_ =
510  advance_slot(state.agg_idx_, next_agg_info, separate_varlen_storage_valid_);
511  state.compact_sz1_ = storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
512  ? storage_qmd.getPaddedSlotWidthBytes(state.agg_idx_)
514  }
515  for (size_t i = 0; i < nrows; ++i) {
516  int8_t const* const pos_ptr = state.buf_ptr_ + i * state.compact_sz1_;
517  int64_t pos = read_int_from_buff(pos_ptr, target_size);
518  CHECK_GE(pos, 0);
519  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
520  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
521  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
522  func(col_frag + pos * target_size);
523  }
524  } else {
525  size_t const key_bytes_with_padding =
527  for (size_t i = 0; i < nrows; ++i) {
528  int8_t const* const keys_ptr = row_ptr_rowwise(storage_->buff_, storage_qmd, i);
529  int8_t const* const rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
530  int64_t pos = *reinterpret_cast<int64_t const*>(rowwise_target_ptr);
531  auto& frag_col_buffers = getColumnFrag(0, target_idx, pos);
532  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
533  int8_t const* const col_frag = frag_col_buffers[col_lazy_fetch.local_col_id];
534  func(col_frag + pos * target_size);
535  }
536  }
537 }
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:302
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getEffectiveKeyWidth() const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK_LT(x, y)
Definition: Logger.h:299
#define CHECK(condition)
Definition: Logger.h:289
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const

+ Here is the call graph for this function:

size_t ResultSet::entryCount ( ) const

Returns the number of entries the result set is allocated to hold.

Note that this can be greater than or equal to the actual number of valid rows in the result set, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by)

For getting the number of valid rows in the result set (inclusive of any applied LIMIT and/or OFFSET), use ResultSet::rowCount(). Or to just test if there are any valid rows, use ResultSet::entryCount(), as a return value from entryCount() greater than 0 does not neccesarily mean the result set is empty.

Definition at line 752 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

752  {
753  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
754 }
Permutation permutation_
Definition: ResultSet.h:945
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 428 of file ResultSet.h.

References CHECK, and storage_.

428  {
429  CHECK(storage_);
430  if (storage_->query_mem_desc_.didOutputColumnar()) {
431  storage_->fillOneEntryColWise(entry);
432  } else {
433  storage_->fillOneEntryRowWise(entry);
434  }
435  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:289
ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 941 of file ResultSet.cpp.

Referenced by getVarlenOutputInfo(), and makeGeoTargetValue().

941  {
942  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
943  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
944  fixedup_entry_idx,
945  stg_idx};
946 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:916
AppendedStorage appended_storage_
Definition: ResultSet.h:939
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 758 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), Executor::executeTableFunction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initRowGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

759  {
760  auto query_mem_desc_copy = query_mem_desc;
761  query_mem_desc_copy.resetGroupColWidths(
762  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
763  if (query_mem_desc.didOutputColumnar()) {
764  return query_mem_desc_copy;
765  }
766  query_mem_desc_copy.alignPaddedSlots();
767  return query_mem_desc_copy;
768 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

unsigned ResultSet::getBlockSize ( ) const
inline

Definition at line 534 of file ResultSet.h.

References block_size_.

534 { return block_size_; }
unsigned block_size_
Definition: ResultSet.h:948
size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 756 of file ResultSetIteration.cpp.

References CHECK, and storage_.

756  {
757  CHECK(storage_);
758  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
759 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:289
SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 419 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

419  {
420  if (just_explain_) {
421  return SQLTypeInfo(kTEXT, false);
422  }
423  CHECK_LT(col_idx, targets_.size());
424  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
425  : targets_[col_idx].sql_type;
426 }
const bool just_explain_
Definition: ResultSet.h:973
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
#define CHECK_LT(x, y)
Definition: Logger.h:299
Definition: sqltypes.h:67
Definition: sqldefs.h:74
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1345 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1347  {
1348  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1349  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1350  const auto column_offset =
1351  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1352  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1353  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1354  storage_->query_mem_desc_.getEntryCount();
1355  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1356  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1357 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:298
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1499 of file ResultSet.cpp.

References CHECK.

1499  {
1501  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1502 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1490
#define CHECK(condition)
Definition: Logger.h:289
size_t getColOffInBytes(const size_t col_idx) const
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1291 of file ResultSetIteration.cpp.

References storage_.

1293  {
1294  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1295  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1296  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1297 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1138 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1140  {
1141  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1142  if (col_buffers_[storage_idx].size() > 1) {
1143  int64_t frag_id = 0;
1144  int64_t local_idx = global_idx;
1145  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1146  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1147  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1148  } else {
1149  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1150  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1151  CHECK_LE(local_idx, global_idx);
1152  }
1153  CHECK_GE(frag_id, int64_t(0));
1154  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1155  global_idx = local_idx;
1156  return col_buffers_[storage_idx][frag_id];
1157  } else {
1158  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1159  return col_buffers_[storage_idx][0];
1160  }
1161 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
#define CHECK_GE(x, y)
Definition: Logger.h:302
#define CHECK_LT(x, y)
Definition: Logger.h:299
#define CHECK_LE(x, y)
Definition: Logger.h:300
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:960
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:959
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 293 of file ResultSet.cpp.

293  {
294  if (crt_row_buff_idx_ == 0) {
295  throw std::runtime_error("current row buffer iteration index is undefined");
296  }
297  return crt_row_buff_idx_ - 1;
298 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 688 of file ResultSet.cpp.

References CHECK, and GPU.

688  {
692 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:963
int ResultSet::getDeviceId ( ) const

Definition at line 754 of file ResultSet.cpp.

754  {
755  return device_id_;
756 }
const int device_id_
Definition: ResultSet.h:936
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 252 of file ResultSet.cpp.

252  {
253  return device_type_;
254 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1215 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1217  {
1218  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1219  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1220  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1221  } else {
1222  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1223  }
1224  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1225  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1226  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1227  } else {
1228  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1229  }
1230  } else {
1231  UNREACHABLE() << "Invalid query type is used";
1232  return 0;
1233  }
1234 }
#define UNREACHABLE()
Definition: Logger.h:333
const long ResultSet::getExecTime ( ) const
inline

Definition at line 501 of file ResultSet.h.

References query_exec_time_.

501 { return query_exec_time_; }
size_t query_exec_time_
Definition: ResultSet.h:984
const Executor* ResultSet::getExecutor ( ) const
inline

Definition at line 627 of file ResultSet.h.

References QueryMemoryDescriptor::getExecutor(), and query_mem_desc_.

627 { return query_mem_desc_.getExecutor(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
const Executor * getExecutor() const

+ Here is the call graph for this function:

std::string ResultSet::getExplanation ( ) const
inline

Definition at line 393 of file ResultSet.h.

References explanation_, and just_explain_.

393  {
394  if (just_explain_) {
395  return explanation_;
396  }
397  return {};
398  }
const bool just_explain_
Definition: ResultSet.h:973
std::string explanation_
Definition: ResultSet.h:972
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 550 of file ResultSet.h.

References geo_return_type_.

550 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
int ResultSet::getGpuCount ( ) const
private
unsigned ResultSet::getGridSize ( ) const
inline

Definition at line 536 of file ResultSet.h.

References grid_size_.

536 { return grid_size_; }
unsigned grid_size_
Definition: ResultSet.h:949
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 694 of file ResultSet.cpp.

694  {
695  return host_estimator_buffer_;
696 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:964
std::unordered_set<size_t> ResultSet::getInputTableKeys ( ) const
inline

Definition at line 507 of file ResultSet.h.

References input_table_keys_.

507 { return input_table_keys_; }
std::unordered_set< size_t > input_table_keys_
Definition: ResultSet.h:986
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 579 of file ResultSet.h.

References lazy_fetch_info_.

579  {
580  return lazy_fetch_info_;
581  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
size_t ResultSet::getLimit ( ) const

Definition at line 1399 of file ResultSet.cpp.

1399  {
1400  return keep_first_;
1401 }
size_t keep_first_
Definition: ResultSet.h:943
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, CHECK_LE, LOG, and logger::WARNING.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  if (bits_set == 0) {
38  // empty result set, return 1 for a groups buffer size of 1
39  return 1;
40  }
41  const auto total_bits = estimator_->getBufferSize() * 8;
42  CHECK_LE(bits_set, total_bits);
43  const auto unset_bits = total_bits - bits_set;
44  const auto ratio = static_cast<double>(unset_bits) / total_bits;
45  if (ratio == 0.) {
46  LOG(WARNING)
47  << "Failed to get a high quality cardinality estimation, falling back to "
48  "approximate group by buffer size guess.";
49  return 0;
50  }
51  return -static_cast<double>(total_bits) * log(ratio);
52 }
#define LOG(tag)
Definition: Logger.h:283
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:962
#define CHECK_LE(x, y)
Definition: Logger.h:300
int8_t * host_estimator_buffer_
Definition: ResultSet.h:964
#define CHECK(condition)
Definition: Logger.h:289
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 296 of file ResultSetIteration.cpp.

297  {
298  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299  if (!storage_ && !just_explain_) {
300  return {};
301  }
302  return getNextRowUnlocked(translate_strings, decimal_to_double);
303 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:976
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
const bool just_explain_
Definition: ResultSet.h:973
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 318 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

319  {
320  size_t entry_buff_idx = 0;
321  do {
323  return {};
324  }
325 
326  entry_buff_idx = advanceCursorToNextEntry();
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
330  return {};
331  }
333  ++fetched_so_far_;
334 
335  } while (drop_first_ && fetched_so_far_ <= drop_first_);
336 
337  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
338  CHECK(!row.empty());
339 
340  return row;
341 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:943
size_t drop_first_
Definition: ResultSet.h:942
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:289
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
size_t advanceCursorToNextEntry() const
std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 305 of file ResultSetIteration.cpp.

307  {
308  if (just_explain_) {
309  if (fetched_so_far_) {
310  return {};
311  }
312  fetched_so_far_ = 1;
313  return {explanation_};
314  }
315  return getNextRowImpl(translate_strings, decimal_to_double);
316 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:973
std::string explanation_
Definition: ResultSet.h:972
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t ResultSet::getNumColumnsLazyFetched ( ) const
inline

Definition at line 588 of file ResultSet.h.

References lazy_fetch_info_.

588  {
589  auto is_lazy = [](auto const& info) { return info.is_lazily_fetched; };
590  return std::count_if(lazy_fetch_info_.begin(), lazy_fetch_info_.end(), is_lazy);
591  }
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 234 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), and row_ptr_rowwise().

234  {
235  const auto storage_lookup_result = findStorage(global_entry_idx);
236  const auto storage = storage_lookup_result.storage_ptr;
237  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238  if (storage->isEmptyEntry(local_entry_idx)) {
239  return {0, false};
240  }
241  const auto buff = storage->buff_;
242  CHECK(buff);
244  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245  const auto key_bytes_with_padding =
247  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
249  keys_ptr,
250  global_entry_idx,
251  targets_.front(),
252  0,
253  0,
254  false,
255  false,
256  false);
257  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
258  CHECK(scalar_tv);
259  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
260  CHECK(ival_ptr);
261  return {*ival_ptr, true};
262 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:941
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
#define CHECK(condition)
Definition: Logger.h:289
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 568 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

568  {
569  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
570  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const Permutation & ResultSet::getPermutationBuffer ( ) const

Definition at line 864 of file ResultSet.cpp.

864  {
865  return permutation_;
866 }
Permutation permutation_
Definition: ResultSet.h:945
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 564 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

564  {
566  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 674 of file ResultSet.cpp.

References CHECK.

674  {
675  CHECK(storage_);
676  return storage_->query_mem_desc_;
677 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:289
const QueryPlanHash ResultSet::getQueryPlanHash ( )
inline

Definition at line 505 of file ResultSet.h.

References query_plan_.

505 { return query_plan_; }
QueryPlanHash query_plan_
Definition: ResultSet.h:985
int64_t ResultSet::getQueueTime ( ) const

Definition at line 724 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 729 of file ResultSet.cpp.

729  {
730  return timings_.render_time;
731 }
QueryExecutionTimings timings_
Definition: ResultSet.h:950
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 273 of file ResultSetIteration.cpp.

275  {
276  if (logical_index >= entryCount()) {
277  return {};
278  }
279  const auto entry_idx =
280  permutation_.empty() ? logical_index : permutation_[logical_index];
281  return getRowAt(entry_idx, false, false, false, targets_to_skip);
282 }
Permutation permutation_
Definition: ResultSet.h:945
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 449 of file ResultSet.h.

References row_set_mem_owner_.

449  {
450  return row_set_mem_owner_;
451  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1323 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1325  {
1326  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1327  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1328  auto keys_ptr = row_ptr_rowwise(
1329  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1330  const auto column_offset =
1331  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1332  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1333  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1334  const auto storage_buffer = keys_ptr + column_offset;
1335  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1336 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:298
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1306 of file ResultSetIteration.cpp.

References storage_.

1308  {
1309  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1310  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1311  const int8_t* storage_buffer =
1312  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1313  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1314 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1505 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1505  {
1506  std::vector<bool> target_bitmap(targets_.size(), true);
1507  size_t num_single_slot_targets = 0;
1508  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1509  const auto& sql_type = targets_[target_idx].sql_type;
1510  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1511  target_bitmap[target_idx] = false;
1512  } else if (sql_type.is_varlen()) {
1513  target_bitmap[target_idx] = false;
1514  } else {
1515  num_single_slot_targets++;
1516  }
1517  }
1518  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1519 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
Definition: sqldefs.h:74

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1549 of file ResultSet.cpp.

References advance_slot().

1549  {
1550  std::vector<size_t> slot_indices(targets_.size(), 0);
1551  size_t slot_index = 0;
1552  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1553  slot_indices[target_idx] = slot_index;
1554  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1555  }
1556  return slot_indices;
1557 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 411 of file ResultSet.cpp.

411  {
412  return storage_.get();
413 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 916 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

916  {
917  size_t fixedup_entry_idx = entry_idx;
918  auto entry_count = storage_->query_mem_desc_.getEntryCount();
919  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
920  if (fixedup_entry_idx < entry_count) {
921  return {0, fixedup_entry_idx};
922  }
923  fixedup_entry_idx -= entry_count;
924  for (size_t i = 0; i < appended_storage_.size(); ++i) {
925  const auto& desc = appended_storage_[i]->query_mem_desc_;
926  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
927  entry_count = desc.getEntryCount();
928  if (fixedup_entry_idx < entry_count) {
929  return {i + 1, fixedup_entry_idx};
930  }
931  fixedup_entry_idx -= entry_count;
932  }
933  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
935  return {};
936 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define UNREACHABLE()
Definition: Logger.h:333
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK_NE(x, y)
Definition: Logger.h:298

+ Here is the caller graph for this function:

std::string ResultSet::getString ( SQLTypeInfo const &  ti,
int64_t const  ival 
) const

Definition at line 1872 of file ResultSetIteration.cpp.

References catalog_, SQLTypeInfo::get_comp_param(), StringDictionaryProxy::getString(), and row_set_mem_owner_.

Referenced by anonymous_namespace{ResultSetIteration.cpp}::build_string_array_target_value(), isLessThan(), and makeStringTargetValue().

1872  {
1873  StringDictionaryProxy* sdp;
1874  if (ti.get_comp_param()) {
1875  constexpr bool with_generation = false;
1876  sdp = catalog_ ? row_set_mem_owner_->getOrAddStringDictProxy(
1877  ti.get_comp_param(), with_generation, catalog_)
1878  : row_set_mem_owner_->getStringDictProxy(
1879  ti.get_comp_param()); // unit tests bypass the catalog
1880  } else {
1881  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1882  }
1883  return sdp->getString(ival);
1884 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
std::string getString(int32_t string_id) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const std::vector< std::string > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 1403 of file ResultSet.cpp.

References catalog_(), and CHECK.

1404  {
1405  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1406  dict_id, /*with_generation=*/true, catalog_);
1407  CHECK(sdp);
1408  return sdp->getDictionary()->copyStrings();
1409 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
#define CHECK(condition)
Definition: Logger.h:289

+ Here is the call graph for this function:

StringDictionaryProxy * ResultSet::getStringDictionaryProxy ( int const  dict_id) const

Definition at line 428 of file ResultSet.cpp.

References catalog_().

428  {
429  constexpr bool with_generation = true;
430  return catalog_ ? row_set_mem_owner_->getOrAddStringDictProxy(
431  dict_id, with_generation, catalog_)
432  : row_set_mem_owner_->getStringDictProxy(dict_id);
433 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944

+ Here is the call graph for this function:

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1529 of file ResultSet.cpp.

References CHECK, CHECK_GE, is_distinct_target(), kFLOAT, and kSAMPLE.

1530  {
1532  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1533 
1534  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1535  const auto& target = targets_[target_idx];
1536  if (single_slot_targets[target_idx] &&
1537  (is_distinct_target(target) ||
1538  shared::is_any<kAPPROX_QUANTILE, kMODE>(target.agg_kind) ||
1539  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1540  single_slot_targets[target_idx] = false;
1541  num_single_slot_targets--;
1542  }
1543  }
1544  CHECK_GE(num_single_slot_targets, size_t(0));
1545  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1546 }
#define CHECK_GE(x, y)
Definition: Logger.h:302
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1505
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
#define CHECK(condition)
Definition: Logger.h:289
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1468

+ Here is the call graph for this function:

ChunkStats ResultSet::getTableFunctionChunkStats ( const size_t  target_idx) const
const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 679 of file ResultSet.cpp.

679  {
680  return targets_;
681 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 683 of file ResultSet.cpp.

References CHECK.

683  {
684  CHECK(storage_);
685  return storage_->target_init_vals_;
686 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
#define CHECK(condition)
Definition: Logger.h:289
std::vector<TargetMetaInfo> ResultSet::getTargetMetaInfo ( )
inline

Definition at line 519 of file ResultSet.h.

References target_meta_info_.

519 { return target_meta_info_; }
std::vector< TargetMetaInfo > target_meta_info_
Definition: ResultSet.h:987
TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 2032 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

2042  {
2044  const auto col1_ptr = col_ptr;
2045  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
2046  const auto next_col_ptr =
2047  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
2048  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2049  is_real_str_or_array(target_info))
2050  ? next_col_ptr
2051  : nullptr;
2052  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
2053  is_real_str_or_array(target_info))
2054  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
2055  : 0;
2056 
2057  // TODO(Saman): add required logics for count distinct
2058  // geospatial target values:
2059  if (target_info.sql_type.is_geometry()) {
2060  return makeGeoTargetValue(
2061  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
2062  }
2063 
2064  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
2065  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2066  CHECK(col2_ptr);
2067  CHECK(compact_sz2);
2068  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
2069  return target_info.agg_kind == kAVG
2070  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2071  : makeVarlenTargetValue(ptr1,
2072  compact_sz1,
2073  ptr2,
2074  compact_sz2,
2075  target_info,
2076  target_logical_idx,
2077  translate_strings,
2078  global_entry_idx);
2079  }
2081  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2082  return makeTargetValue(ptr1,
2083  compact_sz1,
2084  target_info,
2085  target_logical_idx,
2086  translate_strings,
2088  global_entry_idx);
2089  }
2090  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2091  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
2092  CHECK_GE(key_idx, 0);
2093  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
2094  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
2095  key_width,
2096  target_info,
2097  target_logical_idx,
2098  translate_strings,
2100  global_entry_idx);
2101 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:302
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:289
bool is_geometry() const
Definition: sqltypes.h:588
Definition: sqldefs.h:74
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 2105 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

2114  {
2115  if (UNLIKELY(fixup_count_distinct_pointers)) {
2116  if (is_distinct_target(target_info)) {
2117  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2118  const auto remote_ptr = *count_distinct_ptr_ptr;
2119  if (remote_ptr) {
2120  const auto ptr = storage_->mappedPtr(remote_ptr);
2121  if (ptr) {
2122  *count_distinct_ptr_ptr = ptr;
2123  } else {
2124  // need to create a zero filled buffer for this remote_ptr
2125  const auto& count_distinct_desc =
2126  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2127  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2128  ? count_distinct_desc.bitmapSizeBytes()
2129  : count_distinct_desc.bitmapPaddedSizeBytes();
2130  auto count_distinct_buffer = row_set_mem_owner_->allocateCountDistinctBuffer(
2131  bitmap_byte_sz, /*thread_idx=*/0);
2132  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2133  }
2134  }
2135  }
2136  return int64_t(0);
2137  }
2138  if (target_info.sql_type.is_geometry()) {
2139  return makeGeoTargetValue(
2140  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2141  }
2142 
2143  auto ptr1 = rowwise_target_ptr;
2144  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2146  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2147  // Single column perfect hash group by can utilize one slot for both the key and the
2148  // target value if both values fit in 8 bytes. Use the target value actual size for
2149  // this case. If they don't, the target value should be 8 bytes, so we can still use
2150  // the actual size rather than the compact size.
2151  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2152  }
2153 
2154  // logic for deciding width of column
2155  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2156  const auto ptr2 =
2157  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2158  int8_t compact_sz2 = 0;
2159  // Skip reading the second slot if we have a none encoded string and are using
2160  // the none encoded strings buffer attached to ResultSetStorage
2162  (target_info.sql_type.is_array() ||
2163  (target_info.sql_type.is_string() &&
2164  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2165  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2166  }
2167  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2168  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2169  }
2170  CHECK(ptr2);
2171  return target_info.agg_kind == kAVG
2172  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2173  : makeVarlenTargetValue(ptr1,
2174  compact_sz1,
2175  ptr2,
2176  compact_sz2,
2177  target_info,
2178  target_logical_idx,
2179  translate_strings,
2180  entry_buff_idx);
2181  }
2183  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2184  return makeTargetValue(ptr1,
2185  compact_sz1,
2186  target_info,
2187  target_logical_idx,
2188  translate_strings,
2190  entry_buff_idx);
2191  }
2192  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2193  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2194  return makeTargetValue(ptr1,
2195  key_width,
2196  target_info,
2197  target_logical_idx,
2198  translate_strings,
2200  entry_buff_idx);
2201 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool is_agg
Definition: TargetInfo.h:50
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:51
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:388
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:289
bool is_geometry() const
Definition: sqltypes.h:588
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
bool is_string() const
Definition: sqltypes.h:576
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:74
bool is_array() const
Definition: sqltypes.h:584
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

const std::pair< std::vector< int32_t >, std::vector< std::string > > ResultSet::getUniqueStringsForDictEncodedTargetCol ( const size_t  col_idx) const

Definition at line 1412 of file ResultSet.cpp.

References catalog_(), CHECK, and inline_fixed_encoding_null_val().

1412  {
1413  const auto col_type_info = getColType(col_idx);
1414  std::unordered_set<int32_t> unique_string_ids_set;
1415  const size_t num_entries = entryCount();
1416  std::vector<bool> targets_to_skip(colCount(), true);
1417  targets_to_skip[col_idx] = false;
1418  CHECK(col_type_info.is_dict_encoded_type()); // Array<Text> or Text
1419  const int64_t null_val = inline_fixed_encoding_null_val(
1420  col_type_info.is_array() ? col_type_info.get_elem_type() : col_type_info);
1421 
1422  for (size_t row_idx = 0; row_idx < num_entries; ++row_idx) {
1423  const auto result_row = getRowAtNoTranslations(row_idx, targets_to_skip);
1424  if (!result_row.empty()) {
1425  if (const auto scalar_col_val =
1426  boost::get<ScalarTargetValue>(&result_row[col_idx])) {
1427  const int32_t string_id =
1428  static_cast<int32_t>(boost::get<int64_t>(*scalar_col_val));
1429  if (string_id != null_val) {
1430  unique_string_ids_set.emplace(string_id);
1431  }
1432  } else if (const auto array_col_val =
1433  boost::get<ArrayTargetValue>(&result_row[col_idx])) {
1434  if (*array_col_val) {
1435  for (const ScalarTargetValue& scalar : array_col_val->value()) {
1436  const int32_t string_id = static_cast<int32_t>(boost::get<int64_t>(scalar));
1437  if (string_id != null_val) {
1438  unique_string_ids_set.emplace(string_id);
1439  }
1440  }
1441  }
1442  }
1443  }
1444  }
1445 
1446  const size_t num_unique_strings = unique_string_ids_set.size();
1447  std::vector<int32_t> unique_string_ids(num_unique_strings);
1448  size_t string_idx{0};
1449  for (const auto unique_string_id : unique_string_ids_set) {
1450  unique_string_ids[string_idx++] = unique_string_id;
1451  }
1452 
1453  const int32_t dict_id = col_type_info.get_comp_param();
1454  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
1455  dict_id, /*with_generation=*/true, catalog_);
1456  CHECK(sdp);
1457 
1458  return std::make_pair(unique_string_ids, sdp->getStrings(unique_string_ids));
1459 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
size_t colCount() const
Definition: ResultSet.cpp:415
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
SQLTypeInfo getColType(const size_t col_idx) const
Definition: ResultSet.cpp:419
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:289
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto data_mgr = executor->getDataMgr();
636  auto allocator = std::make_unique<CudaAllocator>(
637  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
638  allocator->copyFromDevice(
639  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
640  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
641  } else {
643  host_str_ptr = reinterpret_cast<char*>(str_ptr);
644  }
645  std::string str(host_str_ptr, str_len);
646  return InternalTargetValue(row_set_mem_owner_->addString(str));
647 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

const VarlenOutputInfo * ResultSet::getVarlenOutputInfo ( const size_t  entry_idx) const
private

Definition at line 1163 of file ResultSetIteration.cpp.

References CHECK, and findStorage().

Referenced by makeGeoTargetValue().

1163  {
1164  auto storage_lookup_result = findStorage(entry_idx);
1165  CHECK(storage_lookup_result.storage_ptr);
1166  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1167 }
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:941
#define CHECK(condition)
Definition: Logger.h:289

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::hasValidBuffer ( ) const
inline

Definition at line 527 of file ResultSet.h.

References storage_.

527  {
528  if (storage_) {
529  return true;
530  }
531  return false;
532  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 442 of file ResultSet.h.

References chunk_iters_.

442  {
443  chunk_iters_.push_back(chunk_iters);
444  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:953
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 439 of file ResultSet.h.

References chunks_.

439  {
440  chunks_ = chunks;
441  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:952
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 445 of file ResultSet.h.

References literal_buffers_.

445  {
446  literal_buffers_.push_back(std::move(literal_buff));
447  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:956
void ResultSet::initializeStorage ( ) const

Definition at line 1043 of file ResultSetReduction.cpp.

1043  {
1045  storage_->initializeColWise();
1046  } else {
1047  storage_->initializeRowWise();
1048  }
1049 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
PermutationView ResultSet::initPermutationBuffer ( PermutationView  permutation,
PermutationIdx const  begin,
PermutationIdx const  end 
) const
private

Definition at line 848 of file ResultSet.cpp.

References CHECK, DEBUG_TIMER, and VectorView< T >::push_back().

850  {
851  auto timer = DEBUG_TIMER(__func__);
852  for (PermutationIdx i = begin; i < end; ++i) {
853  const auto storage_lookup_result = findStorage(i);
854  const auto lhs_storage = storage_lookup_result.storage_ptr;
855  const auto off = storage_lookup_result.fixedup_entry_idx;
856  CHECK(lhs_storage);
857  if (!lhs_storage->isEmptyEntry(off)) {
858  permutation.push_back(i);
859  }
860  }
861  return permutation;
862 }
DEVICE void push_back(T const &value)
Definition: VectorView.h:73
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:941
uint32_t PermutationIdx
Definition: ResultSet.h:152
#define CHECK(condition)
Definition: Logger.h:289
#define DEBUG_TIMER(name)
Definition: Logger.h:407

+ Here is the call graph for this function:

void ResultSet::initStatus ( )
inline

Definition at line 472 of file ResultSet.h.

References clearPermutation(), crt_row_buff_idx_, drop_first_, fetched_so_far_, invalidateCachedRowCount(), keep_first_, setGeoReturnType(), and WktString.

472  {
473  // todo(yoonmin): what else we additionally need to consider
474  // to make completely clear status of the resultset for reuse?
475  crt_row_buff_idx_ = 0;
476  fetched_so_far_ = 0;
480  drop_first_ = 0;
481  keep_first_ = 0;
482  }
void setGeoReturnType(const GeoReturnType val)
Definition: ResultSet.h:551
size_t keep_first_
Definition: ResultSet.h:943
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:607
size_t drop_first_
Definition: ResultSet.h:942
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
void clearPermutation()
Definition: ResultSet.h:466

+ Here is the call graph for this function:

void ResultSet::invalidateCachedRowCount ( ) const

Definition at line 607 of file ResultSet.cpp.

References uninitialized_cached_row_count.

Referenced by initStatus().

607  {
609 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52

+ Here is the caller graph for this function:

void ResultSet::invalidateResultSetChunks ( )
inline

Definition at line 484 of file ResultSet.h.

References chunk_iters_, and chunks_.

484  {
485  if (!chunks_.empty()) {
486  chunks_.clear();
487  }
488  if (!chunk_iters_.empty()) {
489  chunk_iters_.clear();
490  }
491  };
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:953
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:952
const bool ResultSet::isCached ( ) const
inline

Definition at line 497 of file ResultSet.h.

References cached_.

497 { return cached_; }
bool cached_
Definition: ResultSet.h:982
bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1468 of file ResultSet.cpp.

References CHECK, g_enable_direct_columnarization, GroupByBaselineHash, GroupByPerfectHash, Projection, and TableFunction.

Referenced by copyColumnIntoBuffer().

1468  {
1470  return false;
1471  } else if (query_mem_desc_.didOutputColumnar()) {
1472  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1480  } else {
1483  return permutation_.empty() && (query_mem_desc_.getQueryDescriptionType() ==
1487  }
1488 }
Permutation permutation_
Definition: ResultSet.h:945
bool g_enable_direct_columnarization
Definition: Execute.cpp:122
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
QueryDescriptionType getQueryDescriptionType() const
#define CHECK(condition)
Definition: Logger.h:289

+ Here is the caller graph for this function:

bool ResultSet::isEmpty ( ) const

Returns a boolean signifying whether there are valid entries in the result set.

Note a result set can be logically empty even if the value returned by ResultSet::entryCount() is > 0, whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function is just implemented as ResultSet::rowCount() == 0, which caches it's value so the row count will only be computed once per finalized result set.

Definition at line 651 of file ResultSet.cpp.

651  {
652  // To simplify this function and de-dup logic with ResultSet::rowCount()
653  // (mismatches between the two were causing bugs), we modified this function
654  // to simply fetch rowCount(). The potential downside of this approach is that
655  // in some cases more work will need to be done, as we can't just stop at the first row.
656  // Mitigating that for most cases is the following:
657  // 1) rowCount() is cached, so the logic for actually computing row counts will run only
658  // once
659  // per result set.
660  // 2) If the cache is empty (cached_row_count_ == -1), rowCount() will use parallel
661  // methods if deemed appropriate, which in many cases could be faster for a sparse
662  // large result set that single-threaded iteration from the beginning
663  // 3) Often where isEmpty() is needed, rowCount() is also needed. Since the first call
664  // to rowCount()
665  // will be cached, there is no extra overhead in these cases
666 
667  return rowCount() == size_t(0);
668 }
size_t rowCount(const bool force_parallel=false) const
Returns the number of valid entries in the result set (i.e that will be returned from the SQL query o...
Definition: ResultSet.cpp:595
const bool ResultSet::isEstimator ( ) const
inline

Definition at line 493 of file ResultSet.h.

References estimator_.

493 { return !estimator_; }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:962
bool ResultSet::isExplain ( ) const

Definition at line 742 of file ResultSet.cpp.

742  {
743  return just_explain_;
744 }
const bool just_explain_
Definition: ResultSet.h:973
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1484 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1484  {
1485  // This should match the logic in makeGeoTargetValue which ultimately calls
1486  // fetch_data_from_gpu when the geo column is on the device.
1487  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1488  // utility function that handles this logic in one place
1489  CHECK_LT(col_idx, targets_.size());
1490  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1491  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1492  " is not a geo column. It is of type " +
1493  targets_[col_idx].sql_type.get_type_name() + ".");
1494  }
1495 
1496  const auto& target_info = targets_[col_idx];
1497  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1498  return false;
1499  }
1500 
1501  if (!lazy_fetch_info_.empty()) {
1502  CHECK_LT(col_idx, lazy_fetch_info_.size());
1503  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1504  return false;
1505  }
1506  }
1507 
1509 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
#define CHECK_LT(x, y)
Definition: Logger.h:299
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
#define IS_GEO(T)
Definition: sqltypes.h:298

+ Here is the call graph for this function:

bool ResultSet::isLessThan ( SQLTypeInfo const &  ti,
int64_t const  lhs,
int64_t const  rhs 
) const

Definition at line 1113 of file ResultSetIteration.cpp.

References shared::bit_cast(), CHECK_EQ, SQLTypeInfo::get_compression(), getString(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kENCODING_DICT, and kFLOAT.

1115  {
1116  if (ti.is_string()) {
1117  CHECK_EQ(kENCODING_DICT, ti.get_compression());
1118  return getString(ti, lhs) < getString(ti, rhs);
1119  } else {
1120  return ti.is_any<kDOUBLE>()
1121  ? shared::bit_cast<double>(lhs) < shared::bit_cast<double>(rhs)
1122  : ti.is_any<kFLOAT>()
1123  ? shared::bit_cast<float>(lhs) < shared::bit_cast<float>(rhs)
1124  : lhs < rhs;
1125  }
1126 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
TO bit_cast(FROM &&from)
Definition: misc.h:298
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2348 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), and null_val_bit_pattern().

2350  {
2351  if (ti.get_notnull()) {
2352  return false;
2353  }
2354  if (val.isInt()) {
2355  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2356  }
2357  if (val.isPair()) {
2358  return !val.i2;
2359  }
2360  if (val.isStr()) {
2361  return !val.i1;
2362  }
2363  CHECK(val.isNull());
2364  return true;
2365 }
bool isPair() const
Definition: TargetValue.h:65
bool isStr() const
Definition: TargetValue.h:69
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isNull() const
Definition: TargetValue.h:67
bool isInt() const
Definition: TargetValue.h:63
#define CHECK(condition)
Definition: Logger.h:289
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:387

+ Here is the call graph for this function:

bool ResultSet::isNullIval ( SQLTypeInfo const &  ti,
bool const  translate_strings,
int64_t const  ival 
)
static

Definition at line 1128 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1130  {
1131  return ti.is_any<kDOUBLE>() ? shared::bit_cast<double>(ival) == NULL_DOUBLE
1132  : ti.is_any<kFLOAT>() ? shared::bit_cast<float>(ival) == NULL_FLOAT
1133  : ti.is_string() ? translate_strings ? ival == NULL_INT : ival == 0
1134  : ival == inline_int_null_val(ti);
1135 }
#define NULL_DOUBLE
#define NULL_FLOAT
#define NULL_INT
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 454 of file ResultSet.h.

References permutation_.

454 { return permutation_.empty(); };
Permutation permutation_
Definition: ResultSet.h:945
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 284 of file ResultSetIteration.cpp.

284  {
285  if (logical_index >= entryCount()) {
286  return true;
287  }
288  const auto entry_idx =
289  permutation_.empty() ? logical_index : permutation_[logical_index];
290  const auto storage_lookup_result = findStorage(entry_idx);
291  const auto storage = storage_lookup_result.storage_ptr;
292  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293  return storage->isEmptyEntry(local_entry_idx);
294 }
Permutation permutation_
Definition: ResultSet.h:945
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:941
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
bool ResultSet::isTruncated ( ) const

Definition at line 738 of file ResultSet.cpp.

738  {
739  return keep_first_ + drop_first_;
740 }
size_t keep_first_
Definition: ResultSet.h:943
size_t drop_first_
Definition: ResultSet.h:942
bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 750 of file ResultSet.cpp.

750  {
751  return for_validation_only_;
752 }
bool for_validation_only_
Definition: ResultSet.h:974
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1490 of file ResultSet.cpp.

References Projection, and TableFunction.

1490  {
1495  appended_storage_.empty() && storage_ &&
1496  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1497 }
AppendedStorage appended_storage_
Definition: ResultSet.h:939
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
QueryDescriptionType getQueryDescriptionType() const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 54 of file ResultSet.cpp.

References anonymous_namespace{Utm.h}::n.

54  {
56  keep_first_ = n;
57 }
size_t keep_first_
Definition: ResultSet.h:943
void invalidateCachedRowCount() const
Definition: ResultSet.cpp:607
constexpr double n
Definition: Utm.h:38
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 649 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

651  {
652  if (!lazy_fetch_info_.empty()) {
653  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
654  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
655  if (col_lazy_fetch.is_lazily_fetched) {
656  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
657  col_buffers_.size());
658  int64_t ival_copy = ival;
659  auto& frag_col_buffers =
660  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
661  target_logical_idx,
662  ival_copy);
663  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
664  CHECK_LT(target_logical_idx, targets_.size());
665  const TargetInfo& target_info = targets_[target_logical_idx];
666  CHECK(!target_info.is_agg);
667  if (target_info.sql_type.is_string() &&
668  target_info.sql_type.get_compression() == kENCODING_NONE) {
669  VarlenDatum vd;
670  bool is_end{false};
672  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
673  storage_lookup_result.fixedup_entry_idx,
674  false,
675  &vd,
676  &is_end);
677  CHECK(!is_end);
678  if (vd.is_null) {
679  return 0;
680  }
681  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
682  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
683  }
684  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
685  }
686  }
687  return ival;
688 }
bool is_null
Definition: Datum.h:55
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
int8_t * pointer
Definition: Datum.h:54
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:934
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
#define CHECK_LT(x, y)
Definition: Logger.h:299
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
#define CHECK(condition)
Definition: Logger.h:289
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: Datum.h:53

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1515 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), getVarlenOutputInfo(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kENCODING_GEOINT, kLINESTRING, kMULTILINESTRING, kMULTIPOINT, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, QueryMemoryDescriptor::slotIsVarlenOutput(), TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1519  {
1520  CHECK(target_info.sql_type.is_geometry());
1521 
1522  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1523  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1524  };
1525 
1526  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1527  const auto storage_info = findStorage(entry_buff_idx);
1528  auto crt_geo_col_ptr = geo_target_ptr;
1529  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1530  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1531  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1532  }
1533  // adjusting the column pointer to represent a pointer to the geo target value
1534  return crt_geo_col_ptr +
1535  storage_info.fixedup_entry_idx *
1536  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1537  slot_idx + range);
1538  };
1539 
1540  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1542  ? getNextTargetBufferColWise(slot_idx, range)
1543  : getNextTargetBufferRowWise(slot_idx, range);
1544  };
1545 
1546  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1547  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1549  };
1550 
1551  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1552  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1554  };
1555 
1556  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1557  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1559  };
1560 
1561  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1562  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1564  };
1565 
1566  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1567  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1569  };
1570 
1571  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1572  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1574  };
1575 
1576  auto getFragColBuffers = [&]() -> decltype(auto) {
1577  const auto storage_idx = getStorageIndex(entry_buff_idx);
1578  CHECK_LT(storage_idx.first, col_buffers_.size());
1579  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1580  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1581  };
1582 
1583  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1584 
1585  auto getDataMgr = [&]() {
1586  auto executor = query_mem_desc_.getExecutor();
1587  CHECK(executor);
1588  return executor->getDataMgr();
1589  };
1590 
1591  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1592  const auto storage_idx = getStorageIndex(entry_buff_idx);
1593  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1594  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1595  return varlen_buffer;
1596  };
1597 
1598  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1599  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1600  return TargetValue(nullptr);
1601  }
1602 
1603  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1604  if (!lazy_fetch_info_.empty()) {
1605  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1606  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1607  }
1608 
1609  switch (target_info.sql_type.get_type()) {
1610  case kPOINT: {
1611  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1612  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1613  CHECK(varlen_output_info);
1614  auto geo_data_ptr = read_int_from_buff(
1615  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1616  auto cpu_data_ptr =
1617  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1618  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1619  target_info.sql_type,
1621  /*data_mgr=*/nullptr,
1622  /*is_gpu_fetch=*/false,
1623  device_id_,
1624  cpu_data_ptr,
1625  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1626  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1627  const auto& varlen_buffer = getSeparateVarlenStorage();
1628  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1629  varlen_buffer.size());
1630 
1631  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1632  target_info.sql_type,
1634  nullptr,
1635  false,
1636  device_id_,
1637  reinterpret_cast<int64_t>(
1638  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1639  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1640  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1641  const auto& frag_col_buffers = getFragColBuffers();
1642  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1643  target_info.sql_type,
1645  frag_col_buffers[col_lazy_fetch->local_col_id],
1646  getCoordsDataPtr(geo_target_ptr));
1647  } else {
1648  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1649  target_info.sql_type,
1651  is_gpu_fetch ? getDataMgr() : nullptr,
1652  is_gpu_fetch,
1653  device_id_,
1654  getCoordsDataPtr(geo_target_ptr),
1655  getCoordsLength(geo_target_ptr));
1656  }
1657  break;
1658  }
1659  case kMULTIPOINT: {
1660  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1661  const auto& varlen_buffer = getSeparateVarlenStorage();
1662  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1663  varlen_buffer.size());
1664 
1665  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1666  target_info.sql_type,
1668  nullptr,
1669  false,
1670  device_id_,
1671  reinterpret_cast<int64_t>(
1672  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1673  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1674  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1675  const auto& frag_col_buffers = getFragColBuffers();
1676  return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1677  target_info.sql_type,
1679  frag_col_buffers[col_lazy_fetch->local_col_id],
1680  getCoordsDataPtr(geo_target_ptr));
1681  } else {
1682  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1683  target_info.sql_type,
1685  is_gpu_fetch ? getDataMgr() : nullptr,
1686  is_gpu_fetch,
1687  device_id_,
1688  getCoordsDataPtr(geo_target_ptr),
1689  getCoordsLength(geo_target_ptr));
1690  }
1691  break;
1692  }
1693  case kLINESTRING: {
1694  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1695  const auto& varlen_buffer = getSeparateVarlenStorage();
1696  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1697  varlen_buffer.size());
1698 
1699  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1700  target_info.sql_type,
1702  nullptr,
1703  false,
1704  device_id_,
1705  reinterpret_cast<int64_t>(
1706  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1707  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1708  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1709  const auto& frag_col_buffers = getFragColBuffers();
1710  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1711  target_info.sql_type,
1713  frag_col_buffers[col_lazy_fetch->local_col_id],
1714  getCoordsDataPtr(geo_target_ptr));
1715  } else {
1716  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1717  target_info.sql_type,
1719  is_gpu_fetch ? getDataMgr() : nullptr,
1720  is_gpu_fetch,
1721  device_id_,
1722  getCoordsDataPtr(geo_target_ptr),
1723  getCoordsLength(geo_target_ptr));
1724  }
1725  break;
1726  }
1727  case kMULTILINESTRING: {
1728  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1729  const auto& varlen_buffer = getSeparateVarlenStorage();
1730  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1731  varlen_buffer.size());
1732 
1733  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1734  target_info.sql_type,
1736  nullptr,
1737  false,
1738  device_id_,
1739  reinterpret_cast<int64_t>(
1740  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1741  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1742  reinterpret_cast<int64_t>(
1743  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1744  static_cast<int64_t>(
1745  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1746  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1747  const auto& frag_col_buffers = getFragColBuffers();
1748 
1749  return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1750  target_info.sql_type,
1752  frag_col_buffers[col_lazy_fetch->local_col_id],
1753  getCoordsDataPtr(geo_target_ptr),
1754  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1755  getCoordsDataPtr(geo_target_ptr));
1756  } else {
1757  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1758  target_info.sql_type,
1760  is_gpu_fetch ? getDataMgr() : nullptr,
1761  is_gpu_fetch,
1762  device_id_,
1763  getCoordsDataPtr(geo_target_ptr),
1764  getCoordsLength(geo_target_ptr),
1765  getRingSizesPtr(geo_target_ptr),
1766  getRingSizesLength(geo_target_ptr) * 4);
1767  }
1768  break;
1769  }
1770  case kPOLYGON: {
1771  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1772  const auto& varlen_buffer = getSeparateVarlenStorage();
1773  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1774  varlen_buffer.size());
1775 
1776  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1777  target_info.sql_type,
1779  nullptr,
1780  false,
1781  device_id_,
1782  reinterpret_cast<int64_t>(
1783  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1784  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1785  reinterpret_cast<int64_t>(
1786  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1787  static_cast<int64_t>(
1788  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1789  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1790  const auto& frag_col_buffers = getFragColBuffers();
1791 
1792  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1793  target_info.sql_type,
1795  frag_col_buffers[col_lazy_fetch->local_col_id],
1796  getCoordsDataPtr(geo_target_ptr),
1797  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1798  getCoordsDataPtr(geo_target_ptr));
1799  } else {
1800  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1801  target_info.sql_type,
1803  is_gpu_fetch ? getDataMgr() : nullptr,
1804  is_gpu_fetch,
1805  device_id_,
1806  getCoordsDataPtr(geo_target_ptr),
1807  getCoordsLength(geo_target_ptr),
1808  getRingSizesPtr(geo_target_ptr),
1809  getRingSizesLength(geo_target_ptr) * 4);
1810  }
1811  break;
1812  }
1813  case kMULTIPOLYGON: {
1814  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1815  const auto& varlen_buffer = getSeparateVarlenStorage();
1816  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1817  varlen_buffer.size());
1818 
1819  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1820  target_info.sql_type,
1822  nullptr,
1823  false,
1824  device_id_,
1825  reinterpret_cast<int64_t>(
1826  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1827  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1828  reinterpret_cast<int64_t>(
1829  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1830  static_cast<int64_t>(
1831  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1832  reinterpret_cast<int64_t>(
1833  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1834  static_cast<int64_t>(
1835  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1836  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1837  const auto& frag_col_buffers = getFragColBuffers();
1838 
1839  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1840  target_info.sql_type,
1842  frag_col_buffers[col_lazy_fetch->local_col_id],
1843  getCoordsDataPtr(geo_target_ptr),
1844  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1845  getCoordsDataPtr(geo_target_ptr),
1846  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1847  getCoordsDataPtr(geo_target_ptr));
1848  } else {
1849  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1850  target_info.sql_type,
1852  is_gpu_fetch ? getDataMgr() : nullptr,
1853  is_gpu_fetch,
1854  device_id_,
1855  getCoordsDataPtr(geo_target_ptr),
1856  getCoordsLength(geo_target_ptr),
1857  getRingSizesPtr(geo_target_ptr),
1858  getRingSizesLength(geo_target_ptr) * 4,
1859  getPolyRingsPtr(geo_target_ptr),
1860  getPolyRingsLength(geo_target_ptr) * 4);
1861  }
1862  break;
1863  }
1864  default:
1865  throw std::runtime_error("Unknown Geometry type encountered: " +
1866  target_info.sql_type.get_type_name());
1867  }
1868  UNREACHABLE();
1869  return TargetValue(nullptr);
1870 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:916
GeoReturnType geo_return_type_
Definition: ResultSet.h:979
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define UNREACHABLE()
Definition: Logger.h:333
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:970
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:380
bool is_agg
Definition: TargetInfo.h:50
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:941
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:299
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:388
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
std::string get_type_name() const
Definition: sqltypes.h:504
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
bool is_geometry() const
Definition: sqltypes.h:588
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ScalarTargetValue ResultSet::makeStringTargetValue ( SQLTypeInfo const &  chosen_type,
bool const  translate_strings,
int64_t const  ival 
) const
private

Definition at line 1886 of file ResultSetIteration.cpp.

References getString(), and NULL_INT.

Referenced by convertToScalarTargetValue(), and makeTargetValue().

1888  {
1889  if (translate_strings) {
1890  if (static_cast<int32_t>(ival) == NULL_INT) { // TODO(alex): this isn't nice, fix it
1891  return NullableString(nullptr);
1892  } else {
1893  return NullableString(getString(chosen_type, ival));
1894  }
1895  } else {
1896  return static_cast<int64_t>(static_cast<int32_t>(ival));
1897  }
1898 }
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
std::string getString(SQLTypeInfo const &, int64_t const ival) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1901 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, calculateQuantile(), CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, convertToScalarTargetValue(), count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), isNullIval(), kAPPROX_QUANTILE, kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kMODE, kSINGLE_VALUE, kSUM, kSUM_IF, result_set::lazy_decode(), lazy_fetch_info_, makeStringTargetValue(), NULL_DOUBLE, nullScalarTargetValue(), query_mem_desc_, read_int_from_buff(), and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1907  {
1908  auto actual_compact_sz = compact_sz;
1909  const auto& type_info = target_info.sql_type;
1910  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1912  actual_compact_sz = sizeof(float);
1913  } else {
1914  actual_compact_sz = sizeof(double);
1915  }
1916  if (target_info.is_agg &&
1917  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1918  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
1919  target_info.agg_kind == kMAX || target_info.agg_kind == kSINGLE_VALUE)) {
1920  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1921  // padded size to 4 bytes to properly read each value.
1922  actual_compact_sz = sizeof(float);
1923  }
1924  }
1925  if (get_compact_type(target_info).is_date_in_days()) {
1926  // Dates encoded in days are converted to 8 byte values on read.
1927  actual_compact_sz = sizeof(int64_t);
1928  }
1929 
1930  // String dictionary keys are read as 32-bit values regardless of encoding
1931  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1932  type_info.get_comp_param()) {
1933  actual_compact_sz = sizeof(int32_t);
1934  }
1935 
1936  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1937  const auto& chosen_type = get_compact_type(target_info);
1938  if (!lazy_fetch_info_.empty()) {
1939  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1940  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1941  if (col_lazy_fetch.is_lazily_fetched) {
1942  CHECK_GE(ival, 0);
1943  const auto storage_idx = getStorageIndex(entry_buff_idx);
1944  CHECK_LT(storage_idx.first, col_buffers_.size());
1945  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1946  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1947  ival = result_set::lazy_decode(
1948  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1949  if (chosen_type.is_fp()) {
1950  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1951  if (chosen_type.get_type() == kFLOAT) {
1952  return ScalarTargetValue(static_cast<float>(dval));
1953  } else {
1954  return ScalarTargetValue(dval);
1955  }
1956  }
1957  }
1958  }
1959  if (target_info.agg_kind == kMODE) {
1960  if (!isNullIval(chosen_type, translate_strings, ival)) {
1961  auto const* const* const agg_mode = reinterpret_cast<AggMode const* const*>(ptr);
1962  if (std::optional<int64_t> const mode = (*agg_mode)->mode()) {
1963  return convertToScalarTargetValue(chosen_type, translate_strings, *mode);
1964  }
1965  }
1966  return nullScalarTargetValue(chosen_type, translate_strings);
1967  }
1968  if (chosen_type.is_fp()) {
1969  if (target_info.agg_kind == kAPPROX_QUANTILE) {
1970  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
1971  ? NULL_DOUBLE // sql_validate / just_validate
1972  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
1973  }
1974  switch (actual_compact_sz) {
1975  case 8: {
1976  const auto dval = *reinterpret_cast<const double*>(ptr);
1977  return chosen_type.get_type() == kFLOAT
1978  ? ScalarTargetValue(static_cast<const float>(dval))
1979  : ScalarTargetValue(dval);
1980  }
1981  case 4: {
1982  CHECK_EQ(kFLOAT, chosen_type.get_type());
1983  return *reinterpret_cast<const float*>(ptr);
1984  }
1985  default:
1986  CHECK(false);
1987  }
1988  }
1989  if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
1990  chosen_type.is_timeinterval()) {
1991  if (is_distinct_target(target_info)) {
1993  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1994  }
1995  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1996  // right type instead
1997  if (inline_int_null_val(chosen_type) ==
1998  int_resize_cast(ival, chosen_type.get_logical_size())) {
1999  return inline_int_null_val(type_info);
2000  }
2001  return ival;
2002  }
2003  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
2004  return makeStringTargetValue(chosen_type, translate_strings, ival);
2005  }
2006  if (chosen_type.is_decimal()) {
2007  if (decimal_to_double) {
2008  if (target_info.is_agg &&
2009  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
2010  target_info.agg_kind == kSUM_IF || target_info.agg_kind == kMIN ||
2011  target_info.agg_kind == kMAX) &&
2012  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
2013  return NULL_DOUBLE;
2014  }
2015  if (!chosen_type.get_notnull() &&
2016  ival ==
2017  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
2018  return NULL_DOUBLE;
2019  }
2020  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
2021  }
2022  return ival;
2023  }
2024  CHECK(false);
2025  return TargetValue(int64_t(0));
2026 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:916
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
static ScalarTargetValue nullScalarTargetValue(SQLTypeInfo const &, bool const translate_strings)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:302
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
static bool isNullIval(SQLTypeInfo const &, bool const translate_strings, int64_t const ival)
ScalarTargetValue makeStringTargetValue(SQLTypeInfo const &chosen_type, bool const translate_strings, int64_t const ival) const
Definition: sqldefs.h:75
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_agg
Definition: TargetInfo.h:50
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
ScalarTargetValue convertToScalarTargetValue(SQLTypeInfo const &, bool const translate_strings, int64_t const val) const
Definition: sqldefs.h:77
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1037
SQLAgg agg_kind
Definition: TargetInfo.h:51
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:559
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:299
bool is_date_in_days() const
Definition: sqltypes.h:974
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
#define CHECK(condition)
Definition: Logger.h:289
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
Definition: sqldefs.h:76
Definition: sqldefs.h:74
Definition: sqldefs.h:83
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1360 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getQueryEngineCudaStreamForDevice(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), FlatBufferManager::isFlatBuffer(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and VarlenArray_get_nth().

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1367  {
1368  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1369  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1370  if (varlen_ptr < 0) {
1371  CHECK_EQ(-1, varlen_ptr);
1372  if (target_info.sql_type.get_type() == kARRAY) {
1373  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1374  }
1375  return TargetValue(nullptr);
1376  }
1377  const auto storage_idx = getStorageIndex(entry_buff_idx);
1378  if (target_info.sql_type.is_string()) {
1379  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1380  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1381  const auto& varlen_buffer_for_storage =
1382  serialized_varlen_buffer_[storage_idx.first];
1383  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1384  return varlen_buffer_for_storage[varlen_ptr];
1385  } else if (target_info.sql_type.get_type() == kARRAY) {
1386  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1387  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1388  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1389 
1390  return build_array_target_value(
1391  target_info.sql_type,
1392  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1393  varlen_buffer[varlen_ptr].size(),
1394  translate_strings,
1396  catalog_);
1397  } else {
1398  CHECK(false);
1399  }
1400  }
1401  if (!lazy_fetch_info_.empty()) {
1402  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1403  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1404  if (col_lazy_fetch.is_lazily_fetched) {
1405  const auto storage_idx = getStorageIndex(entry_buff_idx);
1406  CHECK_LT(storage_idx.first, col_buffers_.size());
1407  auto& frag_col_buffers =
1408  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1409  bool is_end{false};
1410  auto col_buf = const_cast<int8_t*>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1411  if (target_info.sql_type.is_string()) {
1412  VarlenDatum vd;
1414  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, false, &vd, &is_end);
1415  CHECK(!is_end);
1416  if (vd.is_null) {
1417  return TargetValue(nullptr);
1418  }
1419  CHECK(vd.pointer);
1420  CHECK_GT(vd.length, 0u);
1421  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1422  return fetched_str;
1423  } else {
1424  CHECK(target_info.sql_type.is_array());
1425  ArrayDatum ad;
1426  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1427  VarlenArray_get_nth(col_buf, varlen_ptr, &ad, &is_end);
1428  } else {
1430  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1431  }
1432  CHECK(!is_end);
1433  if (ad.is_null) {
1434  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1435  }
1436  CHECK_GE(ad.length, 0u);
1437  if (ad.length > 0) {
1438  CHECK(ad.pointer);
1439  }
1440  return build_array_target_value(target_info.sql_type,
1441  ad.pointer,
1442  ad.length,
1443  translate_strings,
1445  catalog_);
1446  }
1447  }
1448  }
1449  if (!varlen_ptr) {
1450  if (target_info.sql_type.is_array()) {
1451  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1452  }
1453  return TargetValue(nullptr);
1454  }
1455  auto length = read_int_from_buff(ptr2, compact_sz2);
1456  if (target_info.sql_type.is_array()) {
1457  const auto& elem_ti = target_info.sql_type.get_elem_type();
1458  length *= elem_ti.get_array_context_logical_size();
1459  }
1460  std::vector<int8_t> cpu_buffer;
1461  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1462  cpu_buffer.resize(length);
1463  const auto executor = query_mem_desc_.getExecutor();
1464  CHECK(executor);
1465  auto data_mgr = executor->getDataMgr();
1466  auto allocator = std::make_unique<CudaAllocator>(
1467  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1468 
1469  allocator->copyFromDevice(
1470  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1471  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1472  }
1473  if (target_info.sql_type.is_array()) {
1474  return build_array_target_value(target_info.sql_type,
1475  reinterpret_cast<const int8_t*>(varlen_ptr),
1476  length,
1477  translate_strings,
1479  catalog_);
1480  }
1481  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1482 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:916
bool is_null
Definition: Datum.h:55
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
#define CHECK_GE(x, y)
Definition: Logger.h:302
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:970
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:380
#define CHECK_GT(x, y)
Definition: Logger.h:301
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: Datum.h:54
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:217
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:944
bool is_agg
Definition: TargetInfo.h:50
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:181
#define CHECK_LT(x, y)
Definition: Logger.h:299
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:388
int get_array_context_logical_size() const
Definition: sqltypes.h:674
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:958
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
const ExecutorDeviceType device_type_
Definition: ResultSet.h:935
#define CHECK(condition)
Definition: Logger.h:289
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:957
bool separate_varlen_storage_valid_
Definition: ResultSet.h:971
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
bool is_string() const
Definition: sqltypes.h:576
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:186
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:957
bool is_array() const
Definition: sqltypes.h:584
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
DEVICE void VarlenArray_get_nth(int8_t *buf, int n, ArrayDatum *result, bool *is_end)
Definition: sqltypes.h:1466
size_t length
Definition: Datum.h:53
const int device_id_
Definition: ResultSet.h:936

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 733 of file ResultSet.cpp.

733  {
734  crt_row_buff_idx_ = 0;
735  fetched_so_far_ = 0;
736 }
size_t fetched_so_far_
Definition: ResultSet.h:941
size_t crt_row_buff_idx_
Definition: ResultSet.h:940
ScalarTargetValue ResultSet::nullScalarTargetValue ( SQLTypeInfo const &  ti,
bool const  translate_strings 
)
static

Definition at line 1103 of file ResultSetIteration.cpp.

References inline_int_null_val(), SQLTypeInfo::is_any(), SQLTypeInfo::is_string(), kDOUBLE, kFLOAT, NULL_DOUBLE, NULL_FLOAT, and NULL_INT.

Referenced by makeTargetValue().

1104  {
1105  return ti.is_any<kDOUBLE>() ? ScalarTargetValue(NULL_DOUBLE)
1107  : ti.is_string() ? translate_strings
1108  ? ScalarTargetValue(NullableString(nullptr))
1109  : ScalarTargetValue(static_cast<int64_t>(NULL_INT))
1111 }
#define NULL_DOUBLE
#define NULL_FLOAT
bool is_any(T &&value)
Definition: misc.h:258
#define NULL_INT
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 631 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), threading_serial::parallel_reduce(), and logger::thread_local_ids().

631  {
632  using namespace threading;
633  auto execute_parallel_row_count =
634  [this, parent_thread_local_ids = logger::thread_local_ids()](
635  const blocked_range<size_t>& r, size_t row_count) {
636  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
637  for (size_t i = r.begin(); i < r.end(); ++i) {
638  if (!isRowAtEmpty(i)) {
639  ++row_count;
640  }
641  }
642  return row_count;
643  };
644  const auto row_count = parallel_reduce(blocked_range<size_t>(0, entryCount()),
645  size_t(0),
646  execute_parallel_row_count,
647  std::plus<int>());
648  return get_truncated_row_count(row_count, getLimit(), drop_first_);
649 }
size_t getLimit() const
Definition: ResultSet.cpp:1399
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:541
size_t drop_first_
Definition: ResultSet.h:942
Value parallel_reduce(const blocked_range< Int > &range, const Value &identity, const RealBody &real_body, const Reduction &reduction, const Partitioner &p=Partitioner())
Parallel iteration with reduction.
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:873

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 868 of file ResultSet.cpp.

References gpu_enabled::copy(), cpu_threads(), DEBUG_TIMER, threading_std::task_group::run(), logger::thread_local_ids(), and threading_std::task_group::wait().

870  {
871  auto timer = DEBUG_TIMER(__func__);
872  const size_t nthreads = cpu_threads();
873 
874  // Split permutation_ into nthreads subranges and top-sort in-place.
876  std::vector<PermutationView> permutation_views(nthreads);
877  threading::task_group top_sort_threads;
878  for (auto interval : makeIntervals<PermutationIdx>(0, permutation_.size(), nthreads)) {
879  top_sort_threads.run([this,
880  &order_entries,
881  &permutation_views,
882  top_n,
883  executor,
884  parent_thread_local_ids = logger::thread_local_ids(),
885  interval] {
886  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
887  PermutationView pv(permutation_.data() + interval.begin, 0, interval.size());
888  pv = initPermutationBuffer(pv, interval.begin, interval.end);
889  const auto compare = createComparator(order_entries, pv, executor, true);
890  permutation_views[interval.index] = topPermutation(pv, top_n, compare);
891  });
892  }
893  top_sort_threads.wait();
894 
895  // In case you are considering implementing a parallel reduction, note that the
896  // ResultSetComparator constructor is O(N) in order to materialize some of the aggregate
897  // columns as necessary to perform a comparison. This cost is why reduction is chosen to
898  // be serial instead; only one more Comparator is needed below.
899 
900  // Left-copy disjoint top-sorted subranges into one contiguous range.
901  // ++++....+++.....+++++... -> ++++++++++++............
902  auto end = permutation_.begin() + permutation_views.front().size();
903  for (size_t i = 1; i < nthreads; ++i) {
904  std::copy(permutation_views[i].begin(), permutation_views[i].end(), end);
905  end += permutation_views[i].size();
906  }
907 
908  // Top sort final range.
909  PermutationView pv(permutation_.data(), end - permutation_.begin());
910  const auto compare = createComparator(order_entries, pv, executor, false);
911  pv = topPermutation(pv, top_n, compare);
912  permutation_.resize(pv.size());
913  permutation_.shrink_to_fit();
914 }
Permutation permutation_
Definition: ResultSet.h:945
PermutationView initPermutationBuffer(PermutationView permutation, PermutationIdx const begin, PermutationIdx const end) const
Definition: ResultSet.cpp:848
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
Comparator createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const PermutationView permutation, const Executor *executor, const bool single_threaded)
Definition: ResultSet.h:869
static PermutationView topPermutation(PermutationView, const size_t n, const Comparator &)
Definition: ResultSet.cpp:1305
#define DEBUG_TIMER(name)
Definition: Logger.h:407
int cpu_threads()
Definition: thread_count.h:25
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:873

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1359 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

1360  {
1361  auto timer = DEBUG_TIMER(__func__);
1363  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
1364  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
1365  CHECK_EQ(size_t(1), order_entries.size());
1366  auto buffer_ptr = storage_->getUnderlyingBuffer();
1367  for (const auto& order_entry : order_entries) {
1368  const auto target_idx = order_entry.tle_no - 1;
1369  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
1370  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1371  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1372  sort_groups_cpu(sortkey_val_buff,
1373  &idx_buff[0],
1375  order_entry.is_desc,
1376  chosen_bytes);
1377  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
1378  &idx_buff[0],
1380  &tmp_buff[0],
1381  sizeof(int64_t));
1382  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
1383  ++target_idx) {
1384  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
1385  continue;
1386  }
1387  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
1388  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
1389  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
1390  apply_permutation_cpu(satellite_val_buff,
1391  &idx_buff[0],
1393  &tmp_buff[0],
1394  chosen_bytes);
1395  }
1396  }
1397 }
#define CHECK_EQ(x, y)
Definition: Logger.h:297
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:289
#define DEBUG_TIMER(name)
Definition: Logger.h:407
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 1319 of file ResultSet.cpp.

References catalog_(), CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, getQueryEngineCudaStreamForDevice(), GPU, inplace_sort_gpu(), and KernelPerFragment.

1320  {
1321  auto timer = DEBUG_TIMER(__func__);
1322  auto data_mgr = &catalog_->getDataMgr();
1323  const int device_id{0};
1324  auto allocator = std::make_unique<CudaAllocator>(
1325  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
1326  CHECK_GT(block_size_, 0);
1327  CHECK_GT(grid_size_, 0);
1328  std::vector<int64_t*> group_by_buffers(block_size_);
1329  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
1330  auto dev_group_by_buffers =
1331  create_dev_group_by_buffers(allocator.get(),
1332  group_by_buffers,
1334  block_size_,
1335  grid_size_,
1336  device_id,
1338  /*num_input_rows=*/-1,
1339  /*prepend_index_buffer=*/true,
1340  /*always_init_group_by_on_host=*/true,
1341  /*use_bump_allocator=*/false,
1342  /*has_varlen_output=*/false,
1343  /*insitu_allocator*=*/nullptr);
1345  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
1347  *allocator,
1348  group_by_buffers,
1349  query_mem_desc_.getBufferSizeBytes(ExecutorDeviceType::GPU),
1350  dev_group_by_buffers.data,
1352  block_size_,
1353  grid_size_,
1354  device_id,
1355  /*use_bump_allocator=*/false,
1356  /*has_varlen_output=*/false);
1357 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *device_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, const bool has_varlen_output, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:70
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:249
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:947
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:301
unsigned block_size_
Definition: ResultSet.h:948
unsigned grid_size_
Definition: ResultSet.h:949
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
#define DEBUG_TIMER(name)
Definition: Logger.h:407
void copy_group_by_buffers_from_gpu(DeviceAllocator &device_allocator, const std::vector< int64_t * > &group_by_buffers, const size_t groups_buffer_size, const int8_t *group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer, const bool has_varlen_output)

+ Here is the call graph for this function:

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Returns the number of valid entries in the result set (i.e that will be returned from the SQL query or inputted into the next query step)

Note that this can be less than or equal to the value returned by ResultSet::getEntries(), whether due to a SQL LIMIT/OFFSET applied or because the result set representation is inherently sparse (i.e. baseline hash group by).

Internally this function references/sets a cached value (cached_row_count_) so that the cost of computing the result is only paid once per result set.

If the actual row count is not cached and needs to be computed, in some cases that can be O(1) (i.e. if limits and offsets are present, or for the output of a table function). For projections, we use a binary search, so it is O(log n), otherwise it is O(n) (with n being ResultSet::entryCount()), which will be run in parallel if the entry count >= the default of 20000 or if force_parallel is set to true

Note that we currently do not invalidate the cache if the result set is changed (i.e appended to), so this function should only be called after the result set is finalized.

Parameters
force_parallelForces the row count to be computed in parallel if the row count cannot be otherwise be computed from metadata or via a binary search (otherwise parallel search is automatically used for result sets with entryCount() >= 20000)

Definition at line 595 of file ResultSet.cpp.

References CHECK_GE, and uninitialized_cached_row_count.

595  {
596  // cached_row_count_ is atomic, so fetch it into a local variable first
597  // to avoid repeat fetches
598  const int64_t cached_row_count = cached_row_count_;
599  if (cached_row_count != uninitialized_cached_row_count) {
600  CHECK_GE(cached_row_count, 0);
601  return cached_row_count;
602  }
603  setCachedRowCount(rowCountImpl(force_parallel));
604  return cached_row_count_;
605 }
#define CHECK_GE(x, y)
Definition: Logger.h:302
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:975
size_t rowCountImpl(const bool force_parallel) const
Definition: ResultSet.cpp:557
constexpr int64_t uninitialized_cached_row_count
Definition: ResultSet.cpp:52
void setCachedRowCount(const size_t row_count) const
Definition: ResultSet.cpp:611
size_t ResultSet::rowCountImpl ( const bool  force_parallel) const
private

Definition at line 557 of file ResultSet.cpp.

References CHECK, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), Projection, and TableFunction.

557  {
558  if (just_explain_) {
559  return 1;
560  }
562  return entryCount();
563  }
564  if (!permutation_.empty()) {
565  // keep_first_ corresponds to SQL LIMIT
566  // drop_first_ corresponds to SQL OFFSET
568  }
569  if (!storage_) {
570  return 0;
571  }
572  CHECK(permutation_.empty());
574  return binSearchRowCount();
575  }
576 
577  constexpr size_t auto_parallel_row_count_threshold{20000UL};
578  if (force_parallel || entryCount() >= auto_parallel_row_count_threshold) {
579  return parallelRowCount();
580  }
581  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
582  moveToBegin();
583  size_t row_count{0};
584  while (true) {
585  auto crt_row = getNextRowUnlocked(false, false);
586  if (crt_row.empty()) {
587  break;
588  }
589  ++row_count;
590  }
591  moveToBegin();
592  return row_count;
593 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:976
Permutation permutation_
Definition: ResultSet.h:945
void moveToBegin() const
Definition: ResultSet.cpp:733
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:937
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:938
size_t keep_first_
Definition: ResultSet.h:943
const bool just_explain_
Definition: ResultSet.h:973
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:541
size_t parallelRowCount() const
Definition: ResultSet.cpp:631
size_t drop_first_
Definition: ResultSet.h:942
QueryDescriptionType getQueryDescriptionType() const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
#define CHECK(condition)
Definition: Logger.h:289
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
size_t binSearchRowCount() const
Definition: ResultSet.cpp:618

+ Here is the call graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 203 of file ResultSet.h.

Referenced by rowIterator().

205  {
206  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
207 
208  // move to first logical position
209  ++rowIterator;
210 
211  for (size_t index = 0; index < from_logical_index; index++) {
212  ++rowIterator;
213  }
214 
215  return rowIterator;
216  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:203

+ Here is the caller graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 218 of file ResultSet.h.

References rowIterator().

219  {
220  return rowIterator(0, translate_strings, decimal_to_double);
221  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:203

+ Here is the call graph for this function:

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const
void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private
void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const