OmniSciDB  0264ff685a
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t *>> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog, const unsigned block_size, const unsigned grid_size)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 
void setCachedRowCount (const size_t row_count) const
 
size_t entryCount () const
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
void setValidationOnlyRes ()
 
bool isValidationOnlyRes () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr< RowSetMemoryOwnergetRowSetMemOwner () const
 
const std::vector< uint32_t > & getPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >, size_t > getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector< ColumnLazyFetchInfo > & getLazyFetchInfo () const
 
void setSeparateVarlenStorageValid (const bool val)
 
std::shared_ptr< const std::vector< std::string > > getStringDictionaryPayloadCopy (const int dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 

Public Attributes

friend ResultSetBuilder
 

Private Types

using BufferSet = std::set< int64_t >
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getColumnInternal (const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const int8_t * > & getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
std::function< bool(const uint32_t, const uint32_t)> createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap, const Executor *executor)
 
void sortPermutation (const std::function< bool(const uint32_t, const uint32_t)> compare)
 
std::vector< uint32_t > initPermutationBuffer (const size_t start, const size_t step)
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (BufferSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static void topPermutation (std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr< RowSetMemoryOwnerrow_set_mem_owner_
 
std::vector< uint32_t > permutation_
 
const Catalog_Namespace::Catalogcatalog_
 
unsigned block_size_ {0}
 
unsigned grid_size_ {0}
 
QueryExecutionTimings timings_
 
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
 
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
 
std::vector< std::vector< int8_t > > literal_buffers_
 
const std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
 
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
 
std::vector< std::vector< int64_t > > consistent_frag_sizes_
 
const std::shared_ptr< const Analyzer::Estimatorestimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector< SerializedVarlenBufferStorageserialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
bool for_validation_only_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
 
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 152 of file ResultSet.h.

Member Typedef Documentation

◆ BufferSet

using ResultSet::BufferSet = std::set<int64_t>
private

Definition at line 697 of file ResultSet.h.

◆ SerializedVarlenBufferStorage

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 737 of file ResultSet.h.

Member Enumeration Documentation

◆ GeoReturnType

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 359 of file ResultSet.h.

359  {
362  WktString,
365  GeoTargetValueGpuPtr
367  };
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue > > GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

◆ ResultSet() [1/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

Definition at line 56 of file ResultSet.cpp.

References CudaAllocator::allocGpuAbstractBuffer(), block_size_, cached_row_count_, catalog_, checked_calloc(), col_buffers_, consistent_frag_sizes_, crt_row_buff_idx_, data_mgr_, device_estimator_buffer_, device_id_, device_type_, drop_first_, estimator_, fetched_so_far_, for_validation_only_, frag_offsets_, geo_return_type_, Data_Namespace::DataMgr::getCudaMgr(), Data_Namespace::AbstractBuffer::getMemoryPtr(), GPU, grid_size_, host_estimator_buffer_, just_explain_, keep_first_, lazy_fetch_info_, query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, targets_, WktString, and CudaMgr_Namespace::CudaMgr::zeroDeviceMem().

Referenced by ResultSetRowIterator::ResultSetRowIterator(), and TableFunctionExecutionUnit::toString().

63  : targets_(targets)
64  , device_type_(device_type)
65  , device_id_(-1)
66  , query_mem_desc_(query_mem_desc)
68  , fetched_so_far_(0)
69  , drop_first_(0)
70  , keep_first_(0)
71  , row_set_mem_owner_(row_set_mem_owner)
72  , catalog_(catalog)
73  , block_size_(block_size)
74  , grid_size_(grid_size)
75  , data_mgr_(nullptr)
77  , just_explain_(false)
78  , for_validation_only_(false)
79  , cached_row_count_(-1)
bool for_validation_only_
Definition: ResultSet.h:743
GeoReturnType geo_return_type_
Definition: ResultSet.h:748
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:716
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
size_t keep_first_
Definition: ResultSet.h:712
const bool just_explain_
Definition: ResultSet.h:742
unsigned block_size_
Definition: ResultSet.h:717
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
size_t drop_first_
Definition: ResultSet.h:711
unsigned grid_size_
Definition: ResultSet.h:718
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:734
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
size_t fetched_so_far_
Definition: ResultSet.h:710
size_t crt_row_buff_idx_
Definition: ResultSet.h:709
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
const int device_id_
Definition: ResultSet.h:705
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ResultSet() [2/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t *>> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Catalog_Namespace::Catalog catalog,
const unsigned  block_size,
const unsigned  grid_size 
)

◆ ResultSet() [3/5]

ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)

◆ ResultSet() [4/5]

ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 144 of file ResultSet.cpp.

146  , device_id_(-1)
147  , fetched_so_far_(0)
149  , explanation_(explanation)
150  , just_explain_(true)
151  , for_validation_only_(false)
152  , cached_row_count_(-1)
bool for_validation_only_
Definition: ResultSet.h:743
GeoReturnType geo_return_type_
Definition: ResultSet.h:748
const bool just_explain_
Definition: ResultSet.h:742
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
std::string explanation_
Definition: ResultSet.h:741
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
size_t fetched_so_far_
Definition: ResultSet.h:710
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
const int device_id_
Definition: ResultSet.h:705

◆ ResultSet() [5/5]

ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 155 of file ResultSet.cpp.

References cached_row_count_, for_validation_only_, geo_return_type_, just_explain_, separate_varlen_storage_valid_, and WktString.

159  , device_id_(-1)
160  , fetched_so_far_(0)
161  , row_set_mem_owner_(row_set_mem_owner)
162  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
164  , just_explain_(true)
165  , for_validation_only_(false)
166  , cached_row_count_(-1)
bool for_validation_only_
Definition: ResultSet.h:743
GeoReturnType geo_return_type_
Definition: ResultSet.h:748
const bool just_explain_
Definition: ResultSet.h:742
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
QueryExecutionTimings timings_
Definition: ResultSet.h:719
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
size_t fetched_so_far_
Definition: ResultSet.h:710
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
const int device_id_
Definition: ResultSet.h:705

◆ ~ResultSet()

ResultSet::~ResultSet ( )

Definition at line 169 of file ResultSet.cpp.

References appended_storage_, CHECK, CPU, data_mgr_, device_estimator_buffer_, device_type_, Data_Namespace::DataMgr::free(), host_estimator_buffer_, and storage_.

169  {
170  if (storage_) {
171  if (!storage_->buff_is_provided_) {
172  CHECK(storage_->getUnderlyingBuffer());
173  free(storage_->getUnderlyingBuffer());
174  }
175  }
176  for (auto& storage : appended_storage_) {
177  if (storage && !storage->buff_is_provided_) {
178  free(storage->getUnderlyingBuffer());
179  }
180  }
184  }
186  CHECK(data_mgr_);
188  }
189 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:734
int8_t * host_estimator_buffer_
Definition: ResultSet.h:733
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:469
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:732
+ Here is the call graph for this function:

Member Function Documentation

◆ addCompilationQueueTime()

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 444 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::compilation_queue_time, and timings_.

444  {
445  timings_.compilation_queue_time += compilation_queue_time;
446 }
QueryExecutionTimings timings_
Definition: ResultSet.h:719

◆ advanceCursorToNextEntry() [1/2]

void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private

Definition at line 693 of file ResultSetIteration.cpp.

References CHECK_LE, ResultSetRowIterator::crt_row_buff_idx_, drop_first_, entryCount(), ResultSetRowIterator::fetched_so_far_, findStorage(), ResultSetRowIterator::global_entry_idx_, ResultSetRowIterator::global_entry_idx_valid_, keep_first_, and permutation_.

693  {
695  iter.global_entry_idx_valid_ = false;
696  return;
697  }
698 
699  while (iter.crt_row_buff_idx_ < entryCount()) {
700  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
702  const auto storage_lookup_result = findStorage(entry_idx);
703  const auto storage = storage_lookup_result.storage_ptr;
704  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
705  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
706  if (iter.fetched_so_far_ < drop_first_) {
707  ++iter.fetched_so_far_;
708  } else {
709  break;
710  }
711  }
712  ++iter.crt_row_buff_idx_;
713  }
714  if (permutation_.empty()) {
716  } else {
718  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
719  ? iter.crt_row_buff_idx_
721  }
722 
724 
725  if (iter.global_entry_idx_valid_) {
726  ++iter.crt_row_buff_idx_;
727  ++iter.fetched_so_far_;
728  }
729 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:712
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
size_t global_entry_idx_
Definition: ResultSet.h:125
size_t drop_first_
Definition: ResultSet.h:711
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:124
bool global_entry_idx_valid_
Definition: ResultSet.h:126
+ Here is the call graph for this function:

◆ advanceCursorToNextEntry() [2/2]

size_t ResultSet::advanceCursorToNextEntry ( ) const
private

Definition at line 733 of file ResultSetIteration.cpp.

References CHECK_LE, crt_row_buff_idx_, entryCount(), findStorage(), and permutation_.

733  {
734  while (crt_row_buff_idx_ < entryCount()) {
735  const auto entry_idx =
737  const auto storage_lookup_result = findStorage(entry_idx);
738  const auto storage = storage_lookup_result.storage_ptr;
739  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
740  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
741  break;
742  }
744  }
745  if (permutation_.empty()) {
746  return crt_row_buff_idx_;
747  }
751 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:709
+ Here is the call graph for this function:

◆ allocateStorage() [1/3]

const ResultSetStorage * ResultSet::allocateStorage ( ) const

Definition at line 195 of file ResultSet.cpp.

References CHECK, device_type_, QueryMemoryDescriptor::getBufferSizeBytes(), query_mem_desc_, row_set_mem_owner_, storage_, and targets_.

195  {
196  CHECK(!storage_);
198  auto buff =
200  storage_.reset(
201  new ResultSetStorage(targets_, query_mem_desc_, buff, /*buff_is_provided=*/true));
202  return storage_.get();
203 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ allocateStorage() [2/3]

const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &   
) const

◆ allocateStorage() [3/3]

const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const

◆ append()

void ResultSet::append ( ResultSet that)

Definition at line 235 of file ResultSet.cpp.

References appended_storage_, cached_row_count_, CHECK, CHECK_EQ, chunk_iters_, chunks_, col_buffers_, consistent_frag_sizes_, frag_offsets_, QueryMemoryDescriptor::getEntryCount(), literal_buffers_, query_mem_desc_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and QueryMemoryDescriptor::setEntryCount().

235  {
237  if (!that.storage_) {
238  return;
239  }
240  appended_storage_.push_back(std::move(that.storage_));
243  appended_storage_.back()->query_mem_desc_.getEntryCount());
244  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
245  col_buffers_.insert(
246  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
247  frag_offsets_.insert(
248  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
250  that.consistent_frag_sizes_.begin(),
251  that.consistent_frag_sizes_.end());
252  chunk_iters_.insert(
253  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
255  CHECK(that.separate_varlen_storage_valid_);
257  that.serialized_varlen_buffer_.begin(),
258  that.serialized_varlen_buffer_.end());
259  }
260  for (auto& buff : that.literal_buffers_) {
261  literal_buffers_.push_back(std::move(buff));
262  }
263 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:708
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:722
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:739
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:721
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:725
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:729
#define CHECK(condition)
Definition: Logger.h:197
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:728
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
+ Here is the call graph for this function:

◆ baselineSort()

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ binSearchRowCount()

size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 345 of file ResultSet.cpp.

References appended_storage_, drop_first_, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), getLimit(), and storage_.

Referenced by rowCount().

345  {
346  if (!storage_) {
347  return 0;
348  }
349 
350  size_t row_count = storage_->binSearchRowCount();
351  for (auto& s : appended_storage_) {
352  row_count += s->binSearchRowCount();
353  }
354 
355  return get_truncated_row_count(row_count, getLimit(), drop_first_);
356 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:284
size_t drop_first_
Definition: ResultSet.h:711
size_t getLimit() const
Definition: ResultSet.cpp:954
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ canUseFastBaselineSort()

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ colCount()

size_t ResultSet::colCount ( ) const

Definition at line 269 of file ResultSet.cpp.

References just_explain_, and targets_.

269  {
270  return just_explain_ ? 1 : targets_.size();
271 }
const bool just_explain_
Definition: ResultSet.h:742
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703

◆ copyColumnIntoBuffer()

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copy its content into a contiguous output_buffer

Definition at line 1111 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1113  {
1115  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1116  CHECK(output_buffer_size > 0);
1117  CHECK(output_buffer);
1118  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1119  size_t out_buff_offset = 0;
1120 
1121  // the main storage:
1122  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1123  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1124  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1125  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1126  CHECK(crt_buffer_size <= output_buffer_size);
1127  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1128 
1129  out_buff_offset += crt_buffer_size;
1130 
1131  // the appended storages:
1132  for (size_t i = 0; i < appended_storage_.size(); i++) {
1133  const size_t crt_storage_row_count =
1134  appended_storage_[i]->query_mem_desc_.getEntryCount();
1135  if (crt_storage_row_count == 0) {
1136  // skip an empty appended storage
1137  continue;
1138  }
1139  CHECK_LT(out_buff_offset, output_buffer_size);
1140  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1141  const size_t column_offset =
1142  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1143  const int8_t* storage_buffer =
1144  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1145  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1146  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1147 
1148  out_buff_offset += crt_buffer_size;
1149  }
1150 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:973
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ create_active_buffer_set()

void ResultSet::create_active_buffer_set ( BufferSet count_distinct_active_buffer_set) const
private

◆ createComparator()

std::function<bool(const uint32_t, const uint32_t)> ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const bool  use_heap,
const Executor executor 
)
inlineprivate

Definition at line 637 of file ResultSet.h.

References DEBUG_TIMER.

Referenced by parallelTop(), and sort().

640  {
641  auto timer = DEBUG_TIMER(__func__);
644  std::make_unique<ResultSetComparator<ColumnWiseTargetAccessor>>(
645  order_entries, use_heap, this, executor);
646  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
647  return (*this->column_wise_comparator_)(lhs, rhs);
648  };
649  } else {
650  row_wise_comparator_ = std::make_unique<ResultSetComparator<RowWiseTargetAccessor>>(
651  order_entries, use_heap, this, executor);
652  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
653  return (*this->row_wise_comparator_)(lhs, rhs);
654  };
655  }
656  }
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
Definition: ResultSet.h:753
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
Definition: ResultSet.h:752
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the caller graph for this function:

◆ definitelyHasNoRows()

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 393 of file ResultSet.cpp.

References estimator_, just_explain_, and storage_.

393  {
394  return !storage_ && !estimator_ && !just_explain_;
395 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
const bool just_explain_
Definition: ResultSet.h:742
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:731

◆ didOutputColumnar()

bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 377 of file ResultSet.h.

377 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706

◆ doBaselineSort()

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ dropFirstN()

void ResultSet::dropFirstN ( const size_t  n)

Definition at line 51 of file ResultSet.cpp.

References CHECK_EQ.

51  {
53  drop_first_ = n;
54 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
size_t drop_first_
Definition: ResultSet.h:711

◆ entryCount()

size_t ResultSet::entryCount ( ) const

Definition at line 753 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

Referenced by advanceCursorToNextEntry(), parallelRowCount(), rowCount(), and sort().

753  {
754  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
755 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fillOneEntry()

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 321 of file ResultSet.h.

References CHECK.

321  {
322  CHECK(storage_);
323  if (storage_->query_mem_desc_.didOutputColumnar()) {
324  storage_->fillOneEntryColWise(entry);
325  } else {
326  storage_->fillOneEntryRowWise(entry);
327  }
328  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK(condition)
Definition: Logger.h:197

◆ findStorage()

ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 660 of file ResultSet.cpp.

References appended_storage_, getStorageIndex(), and storage_.

Referenced by advanceCursorToNextEntry(), initPermutationBuffer(), and makeGeoTargetValue().

660  {
661  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
662  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
663  fixedup_entry_idx,
664  stg_idx};
665 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixupCountDistinctPointers()

void ResultSet::fixupCountDistinctPointers ( )
private

◆ fixupQueryMemoryDescriptor()

QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 482 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and QueryMemoryDescriptor::resetGroupColWidths().

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

483  {
484  auto query_mem_desc_copy = query_mem_desc;
485  query_mem_desc_copy.resetGroupColWidths(
486  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
487  if (query_mem_desc.didOutputColumnar()) {
488  return query_mem_desc_copy;
489  }
490  query_mem_desc_copy.alignPaddedSlots();
491  return query_mem_desc_copy;
492 }
void resetGroupColWidths(const std::vector< int8_t > &new_group_col_widths)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getBufferSizeBytes()

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 757 of file ResultSetIteration.cpp.

References CHECK, and storage_.

757  {
758  CHECK(storage_);
759  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
760 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK(condition)
Definition: Logger.h:197

◆ getColType()

SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 273 of file ResultSet.cpp.

References CHECK_LT, just_explain_, kAVG, kDOUBLE, kTEXT, and targets_.

273  {
274  if (just_explain_) {
275  return SQLTypeInfo(kTEXT, false);
276  }
277  CHECK_LT(col_idx, targets_.size());
278  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
279  : targets_[col_idx].sql_type;
280 }
const bool just_explain_
Definition: ResultSet.h:742
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
#define CHECK_LT(x, y)
Definition: Logger.h:207
Definition: sqltypes.h:51
Definition: sqldefs.h:72

◆ getColumnarBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1283 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1285  {
1286  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1287  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1288  const auto column_offset =
1289  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1290  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1291  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1292  storage_->query_mem_desc_.getEntryCount();
1293  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1294  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1295 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK_NE(x, y)
Definition: Logger.h:206

◆ getColumnarBuffer()

const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 998 of file ResultSet.cpp.

References CHECK, QueryMemoryDescriptor::getColOffInBytes(), isZeroCopyColumnarConversionPossible(), query_mem_desc_, and storage_.

998  {
1000  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1001 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:991
#define CHECK(condition)
Definition: Logger.h:197
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:

◆ getColumnarPerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarPerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1229 of file ResultSetIteration.cpp.

References storage_.

1231  {
1232  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1233  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1234  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1235 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707

◆ getColumnFrag()

const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1082 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1084  {
1085  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1086  if (col_buffers_[storage_idx].size() > 1) {
1087  int64_t frag_id = 0;
1088  int64_t local_idx = global_idx;
1089  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1090  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1091  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1092  } else {
1093  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1094  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1095  CHECK_LE(local_idx, global_idx);
1096  }
1097  CHECK_GE(frag_id, int64_t(0));
1098  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1099  global_idx = local_idx;
1100  return col_buffers_[storage_idx][frag_id];
1101  } else {
1102  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1103  return col_buffers_[storage_idx][0];
1104  }
1105 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define CHECK_GE(x, y)
Definition: Logger.h:210
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK_LE(x, y)
Definition: Logger.h:208
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:729
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:728
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getColumnInternal()

InternalTargetValue ResultSet::getColumnInternal ( const int8_t *  buff,
const size_t  entry_idx,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

◆ getCurrentRowBufferIndex()

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 227 of file ResultSet.cpp.

References crt_row_buff_idx_.

227  {
228  if (crt_row_buff_idx_ == 0) {
229  throw std::runtime_error("current row buffer iteration index is undefined");
230  }
231  return crt_row_buff_idx_ - 1;
232 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:709

◆ getDataManager()

Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private

◆ getDeviceEstimatorBuffer()

int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 411 of file ResultSet.cpp.

References CHECK, device_estimator_buffer_, device_type_, Data_Namespace::AbstractBuffer::getMemoryPtr(), and GPU.

411  {
415 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:732
+ Here is the call graph for this function:

◆ getDeviceId()

int ResultSet::getDeviceId ( ) const

Definition at line 478 of file ResultSet.cpp.

References device_id_.

478  {
479  return device_id_;
480 }
const int device_id_
Definition: ResultSet.h:705

◆ getDeviceType()

ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 191 of file ResultSet.cpp.

References device_type_.

191  {
192  return device_type_;
193 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704

◆ getDistinctBufferRefFromBufferRowwise()

int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private

◆ getEntryAt() [1/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

◆ getEntryAt() [2/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1153 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1155  {
1156  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1157  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1158  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1159  } else {
1160  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1161  }
1162  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1163  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1164  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1165  } else {
1166  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1167  }
1168  } else {
1169  UNREACHABLE() << "Invalid query type is used";
1170  return 0;
1171  }
1172 }
#define UNREACHABLE()
Definition: Logger.h:241

◆ getGeoReturnType()

GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 368 of file ResultSet.h.

368 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:748

◆ getGpuCount()

int ResultSet::getGpuCount ( ) const
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ getHostEstimatorBuffer()

int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 417 of file ResultSet.cpp.

References host_estimator_buffer_.

417  {
418  return host_estimator_buffer_;
419 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:733

◆ getLazyFetchInfo()

const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 397 of file ResultSet.h.

397  {
398  return lazy_fetch_info_;
399  }
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726

◆ getLimit()

size_t ResultSet::getLimit ( ) const

Definition at line 954 of file ResultSet.cpp.

References keep_first_.

Referenced by binSearchRowCount(), and parallelRowCount().

954  {
955  return keep_first_;
956 }
size_t keep_first_
Definition: ResultSet.h:712
+ Here is the caller graph for this function:

◆ getNDVEstimator()

size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, and CHECK_LE.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  const auto total_bits = estimator_->getBufferSize() * 8;
38  CHECK_LE(bits_set, total_bits);
39  const auto unset_bits = total_bits - bits_set;
40  const auto ratio = static_cast<double>(unset_bits) / total_bits;
41  if (ratio == 0.) {
42  throw std::runtime_error("Failed to get a high quality cardinality estimation");
43  }
44  return -static_cast<double>(total_bits) * log(ratio);
45 }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:731
#define CHECK_LE(x, y)
Definition: Logger.h:208
int8_t * host_estimator_buffer_
Definition: ResultSet.h:733
#define CHECK(condition)
Definition: Logger.h:197
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37
+ Here is the call graph for this function:

◆ getNextRow()

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 298 of file ResultSetIteration.cpp.

299  {
300  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
301  if (!storage_ && !just_explain_) {
302  return {};
303  }
304  return getNextRowUnlocked(translate_strings, decimal_to_double);
305 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:745
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:742

◆ getNextRowImpl()

std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 320 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

321  {
322  size_t entry_buff_idx = 0;
323  do {
325  return {};
326  }
327 
328  entry_buff_idx = advanceCursorToNextEntry();
329 
330  if (crt_row_buff_idx_ >= entryCount()) {
332  return {};
333  }
335  ++fetched_so_far_;
336 
337  } while (drop_first_ && fetched_so_far_ <= drop_first_);
338 
339  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
340  CHECK(!row.empty());
341 
342  return row;
343 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:712
std::vector< TargetValue > getRowAt(const size_t index) const
size_t drop_first_
Definition: ResultSet.h:711
#define CHECK(condition)
Definition: Logger.h:197
size_t fetched_so_far_
Definition: ResultSet.h:710
size_t crt_row_buff_idx_
Definition: ResultSet.h:709
size_t advanceCursorToNextEntry() const

◆ getNextRowUnlocked()

std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 307 of file ResultSetIteration.cpp.

Referenced by rowCount().

309  {
310  if (just_explain_) {
311  if (fetched_so_far_) {
312  return {};
313  }
314  fetched_so_far_ = 1;
315  return {explanation_};
316  }
317  return getNextRowImpl(translate_strings, decimal_to_double);
318 }
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:742
std::string explanation_
Definition: ResultSet.h:741
size_t fetched_so_far_
Definition: ResultSet.h:710
+ Here is the caller graph for this function:

◆ getOneColRow()

OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 236 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), getRowAt(), and row_ptr_rowwise().

236  {
237  const auto storage_lookup_result = findStorage(global_entry_idx);
238  const auto storage = storage_lookup_result.storage_ptr;
239  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
240  if (storage->isEmptyEntry(local_entry_idx)) {
241  return {0, false};
242  }
243  const auto buff = storage->buff_;
244  CHECK(buff);
246  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
247  const auto key_bytes_with_padding =
249  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
250  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
251  keys_ptr,
252  global_entry_idx,
253  targets_.front(),
254  0,
255  0,
256  false,
257  false,
258  false);
259  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
260  CHECK(scalar_tv);
261  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
262  CHECK(ival_ptr);
263  return {*ival_ptr, true};
264 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
#define CHECK(condition)
Definition: Logger.h:197
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:

◆ getPaddedSlotWidthBytes()

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 386 of file ResultSet.h.

386  {
387  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
388  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

◆ getPermutationBuffer()

const std::vector< uint32_t > & ResultSet::getPermutationBuffer ( ) const

Definition at line 590 of file ResultSet.cpp.

References permutation_.

590  {
591  return permutation_;
592 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714

◆ getQueryDescriptionType()

QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 382 of file ResultSet.h.

382  {
384  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
QueryDescriptionType getQueryDescriptionType() const

◆ getQueryMemDesc()

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 397 of file ResultSet.cpp.

References CHECK, and storage_.

397  {
398  CHECK(storage_);
399  return storage_->query_mem_desc_;
400 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK(condition)
Definition: Logger.h:197

◆ getQueueTime()

◆ getRenderTime()

int64_t ResultSet::getRenderTime ( ) const

Definition at line 453 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::render_time, and timings_.

453  {
454  return timings_.render_time;
455 }
QueryExecutionTimings timings_
Definition: ResultSet.h:719

◆ getRowAt() [1/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const

Referenced by result_set::get_byteoff_of_slot(), and getOneColRow().

+ Here is the caller graph for this function:

◆ getRowAt() [2/3]

TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const

◆ getRowAt() [3/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private

◆ getRowAtNoTranslations()

std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 275 of file ResultSetIteration.cpp.

277  {
278  if (logical_index >= entryCount()) {
279  return {};
280  }
281  const auto entry_idx =
282  permutation_.empty() ? logical_index : permutation_[logical_index];
283  return getRowAt(entry_idx, false, false, false, targets_to_skip);
284 }
size_t entryCount() const
std::vector< TargetValue > getRowAt(const size_t index) const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714

◆ getRowSetMemOwner()

std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 342 of file ResultSet.h.

342  {
343  return row_set_mem_owner_;
344  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713

◆ getRowWiseBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWiseBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1261 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1263  {
1264  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1265  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1266  auto keys_ptr = row_ptr_rowwise(
1267  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1268  const auto column_offset =
1269  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1270  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1271  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1272  const auto storage_buffer = keys_ptr + column_offset;
1273  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1274 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK_NE(x, y)
Definition: Logger.h:206
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
+ Here is the call graph for this function:

◆ getRowWisePerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWisePerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1244 of file ResultSetIteration.cpp.

References storage_.

1246  {
1247  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1248  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1249  const int8_t* storage_buffer =
1250  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1251  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1252 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707

◆ getSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1004 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), kAVG, and targets_.

Referenced by getSupportedSingleSlotTargetBitmap().

1004  {
1005  std::vector<bool> target_bitmap(targets_.size(), true);
1006  size_t num_single_slot_targets = 0;
1007  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1008  const auto& sql_type = targets_[target_idx].sql_type;
1009  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1010  target_bitmap[target_idx] = false;
1011  } else if (sql_type.is_varlen()) {
1012  target_bitmap[target_idx] = false;
1013  } else {
1014  num_single_slot_targets++;
1015  }
1016  }
1017  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1018 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
Definition: sqldefs.h:72
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getSlotIndicesForTargetIndices()

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1047 of file ResultSet.cpp.

References advance_slot(), and targets_.

1047  {
1048  std::vector<size_t> slot_indices(targets_.size(), 0);
1049  size_t slot_index = 0;
1050  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1051  slot_indices[target_idx] = slot_index;
1052  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1053  }
1054  return slot_indices;
1055 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
+ Here is the call graph for this function:

◆ getStorage()

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 265 of file ResultSet.cpp.

References storage_.

265  {
266  return storage_.get();
267 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707

◆ getStorageIndex()

std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 635 of file ResultSet.cpp.

References appended_storage_, CHECK_NE, QueryMemoryDescriptor::getEntryCount(), query_mem_desc_, storage_, and UNREACHABLE.

Referenced by findStorage(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

635  {
636  size_t fixedup_entry_idx = entry_idx;
637  auto entry_count = storage_->query_mem_desc_.getEntryCount();
638  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
639  if (fixedup_entry_idx < entry_count) {
640  return {0, fixedup_entry_idx};
641  }
642  fixedup_entry_idx -= entry_count;
643  for (size_t i = 0; i < appended_storage_.size(); ++i) {
644  const auto& desc = appended_storage_[i]->query_mem_desc_;
645  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
646  entry_count = desc.getEntryCount();
647  if (fixedup_entry_idx < entry_count) {
648  return {i + 1, fixedup_entry_idx};
649  }
650  fixedup_entry_idx -= entry_count;
651  }
652  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
654  return {};
655 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define UNREACHABLE()
Definition: Logger.h:241
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK_NE(x, y)
Definition: Logger.h:206
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getStringDictionaryPayloadCopy()

std::shared_ptr< const std::vector< std::string > > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 958 of file ResultSet.cpp.

References catalog_, CHECK, and row_set_mem_owner_.

959  {
960  const auto sdp = row_set_mem_owner_->getOrAddStringDictProxy(
961  dict_id, /*with_generation=*/false, catalog_);
962  CHECK(sdp);
963  return sdp->getDictionary()->copyStrings();
964 }
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:716
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
#define CHECK(condition)
Definition: Logger.h:197

◆ getSupportedSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1028 of file ResultSet.cpp.

References CHECK, CHECK_GE, getSingleSlotTargetBitmap(), is_distinct_target(), isDirectColumnarConversionPossible(), kFLOAT, kSAMPLE, and targets_.

1029  {
1031  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1032 
1033  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1034  const auto& target = targets_[target_idx];
1035  if (single_slot_targets[target_idx] &&
1036  (is_distinct_target(target) ||
1037  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1038  single_slot_targets[target_idx] = false;
1039  num_single_slot_targets--;
1040  }
1041  }
1042  CHECK_GE(num_single_slot_targets, size_t(0));
1043  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1044 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:973
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:130
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1004
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ getTargetInfos()

const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 402 of file ResultSet.cpp.

References targets_.

402  {
403  return targets_;
404 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703

◆ getTargetInitVals()

const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 406 of file ResultSet.cpp.

References CHECK, and storage_.

406  {
407  CHECK(storage_);
408  return storage_->target_init_vals_;
409 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK(condition)
Definition: Logger.h:197

◆ getTargetValueFromBufferColwise()

TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1866 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, anonymous_namespace{ResultSetIteration.cpp}::calculate_quantile(), CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAPPROX_MEDIAN, kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1876  {
1878  const auto col1_ptr = col_ptr;
1879  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1880  const auto next_col_ptr =
1881  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1882  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1883  is_real_str_or_array(target_info))
1884  ? next_col_ptr
1885  : nullptr;
1886  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1887  is_real_str_or_array(target_info))
1888  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1889  : 0;
1890 
1891  // TODO(Saman): add required logics for count distinct
1892  // geospatial target values:
1893  if (target_info.sql_type.is_geometry()) {
1894  return makeGeoTargetValue(
1895  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1896  }
1897 
1898  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1899  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1900  CHECK(col2_ptr);
1901  CHECK(compact_sz2);
1902  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1903  return target_info.agg_kind == kAVG
1904  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1905  : makeVarlenTargetValue(ptr1,
1906  compact_sz1,
1907  ptr2,
1908  compact_sz2,
1909  target_info,
1910  target_logical_idx,
1911  translate_strings,
1912  global_entry_idx);
1913  } else if (target_info.agg_kind == kAPPROX_MEDIAN) {
1914  return calculate_quantile(ptr1, 0.5);
1915  }
1917  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1918  return makeTargetValue(ptr1,
1919  compact_sz1,
1920  target_info,
1921  target_logical_idx,
1922  translate_strings,
1924  global_entry_idx);
1925  }
1926  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1927  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1928  CHECK_GE(key_idx, 0);
1929  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1930  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1931  key_width,
1932  target_info,
1933  target_logical_idx,
1934  translate_strings,
1936  global_entry_idx);
1937 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define CHECK_GE(x, y)
Definition: Logger.h:210
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
bool is_agg
Definition: TargetInfo.h:40
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
bool is_geometry() const
Definition: sqltypes.h:490
bool is_real_str_or_array(const TargetInfo &target_info)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
double calculate_quantile(int8_t const *ptr, double const q)
Definition: sqldefs.h:72
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getTargetValueFromBufferRowwise()

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1941 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, anonymous_namespace{ResultSetIteration.cpp}::calculate_quantile(), CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAPPROX_MEDIAN, kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1950  {
1951  if (UNLIKELY(fixup_count_distinct_pointers)) {
1952  if (is_distinct_target(target_info)) {
1953  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
1954  const auto remote_ptr = *count_distinct_ptr_ptr;
1955  if (remote_ptr) {
1956  const auto ptr = storage_->mappedPtr(remote_ptr);
1957  if (ptr) {
1958  *count_distinct_ptr_ptr = ptr;
1959  } else {
1960  // need to create a zero filled buffer for this remote_ptr
1961  const auto& count_distinct_desc =
1962  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
1963  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
1964  ? count_distinct_desc.bitmapSizeBytes()
1965  : count_distinct_desc.bitmapPaddedSizeBytes();
1966  auto count_distinct_buffer =
1967  row_set_mem_owner_->allocateCountDistinctBuffer(bitmap_byte_sz);
1968  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
1969  }
1970  }
1971  }
1972  return int64_t(0);
1973  }
1974  if (target_info.sql_type.is_geometry()) {
1975  return makeGeoTargetValue(
1976  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
1977  }
1978 
1979  auto ptr1 = rowwise_target_ptr;
1980  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1982  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
1983  // Single column perfect hash group by can utilize one slot for both the key and the
1984  // target value if both values fit in 8 bytes. Use the target value actual size for
1985  // this case. If they don't, the target value should be 8 bytes, so we can still use
1986  // the actual size rather than the compact size.
1987  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
1988  }
1989 
1990  // logic for deciding width of column
1991  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1992  const auto ptr2 =
1993  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1994  int8_t compact_sz2 = 0;
1995  // Skip reading the second slot if we have a none encoded string and are using
1996  // the none encoded strings buffer attached to ResultSetStorage
1998  (target_info.sql_type.is_array() ||
1999  (target_info.sql_type.is_string() &&
2000  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2001  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2002  }
2003  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2004  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2005  }
2006  CHECK(ptr2);
2007  return target_info.agg_kind == kAVG
2008  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2009  : makeVarlenTargetValue(ptr1,
2010  compact_sz1,
2011  ptr2,
2012  compact_sz2,
2013  target_info,
2014  target_logical_idx,
2015  translate_strings,
2016  entry_buff_idx);
2017  } else if (target_info.agg_kind == kAPPROX_MEDIAN) {
2018  return calculate_quantile(rowwise_target_ptr, 0.5);
2019  }
2021  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2022  return makeTargetValue(ptr1,
2023  compact_sz1,
2024  target_info,
2025  target_logical_idx,
2026  translate_strings,
2028  entry_buff_idx);
2029  }
2030  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2031  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2032  return makeTargetValue(ptr1,
2033  key_width,
2034  target_info,
2035  target_logical_idx,
2036  translate_strings,
2038  entry_buff_idx);
2039 }
bool is_array() const
Definition: sqltypes.h:486
bool is_string() const
Definition: sqltypes.h:478
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:319
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
bool is_agg
Definition: TargetInfo.h:40
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:130
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool is_geometry() const
Definition: sqltypes.h:490
int64_t getTargetGroupbyIndex(const size_t target_idx) const
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
double calculate_quantile(int8_t const *ptr, double const q)
Definition: sqldefs.h:72
bool isSingleColumnGroupByWithPerfectHash() const
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getVarlenOrderEntry()

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, copy_from_gpu(), CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto& data_mgr = executor->catalog_->getDataMgr();
636  copy_from_gpu(&data_mgr,
637  &cpu_buffer[0],
638  static_cast<CUdeviceptr>(str_ptr),
639  str_len,
640  device_id_);
641  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
642  } else {
644  host_str_ptr = reinterpret_cast<char*>(str_ptr);
645  }
646  std::string str(host_str_ptr, str_len);
647  return InternalTargetValue(row_set_mem_owner_->addString(str));
648 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
const Executor * getExecutor() const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
const int device_id_
Definition: ResultSet.h:705
+ Here is the call graph for this function:

◆ holdChunkIterators()

void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 335 of file ResultSet.h.

335  {
336  chunk_iters_.push_back(chunk_iters);
337  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:722

◆ holdChunks()

void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 332 of file ResultSet.h.

332  {
333  chunks_ = chunks;
334  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:721

◆ holdLiterals()

void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 338 of file ResultSet.h.

338  {
339  literal_buffers_.push_back(std::move(literal_buff));
340  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:725

◆ initializeStorage()

void ResultSet::initializeStorage ( ) const

Definition at line 1016 of file ResultSetReduction.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

1016  {
1018  storage_->initializeColWise();
1019  } else {
1020  storage_->initializeRowWise();
1021  }
1022 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
+ Here is the call graph for this function:

◆ initPermutationBuffer()

std::vector< uint32_t > ResultSet::initPermutationBuffer ( const size_t  start,
const size_t  step 
)
private

Definition at line 571 of file ResultSet.cpp.

References CHECK, CHECK_NE, DEBUG_TIMER, findStorage(), QueryMemoryDescriptor::getEntryCount(), and query_mem_desc_.

Referenced by parallelTop(), and sort().

572  {
573  auto timer = DEBUG_TIMER(__func__);
574  CHECK_NE(size_t(0), step);
575  std::vector<uint32_t> permutation;
576  const auto total_entries = query_mem_desc_.getEntryCount();
577  permutation.reserve(total_entries / step);
578  for (size_t i = start; i < total_entries; i += step) {
579  const auto storage_lookup_result = findStorage(i);
580  const auto lhs_storage = storage_lookup_result.storage_ptr;
581  const auto off = storage_lookup_result.fixedup_entry_idx;
582  CHECK(lhs_storage);
583  if (!lhs_storage->isEmptyEntry(off)) {
584  permutation.emplace_back(i);
585  }
586  }
587  return permutation;
588 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define CHECK_NE(x, y)
Definition: Logger.h:206
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isDirectColumnarConversionPossible()

bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 973 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), g_enable_direct_columnarization, QueryMemoryDescriptor::getQueryDescriptionType(), GroupByBaselineHash, GroupByPerfectHash, permutation_, Projection, and query_mem_desc_.

Referenced by copyColumnIntoBuffer(), and getSupportedSingleSlotTargetBitmap().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isExplain()

bool ResultSet::isExplain ( ) const

Definition at line 466 of file ResultSet.cpp.

References just_explain_.

466  {
467  return just_explain_;
468 }
const bool just_explain_
Definition: ResultSet.h:742

◆ isGeoColOnGpu()

bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1424 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1424  {
1425  // This should match the logic in makeGeoTargetValue which ultimately calls
1426  // fetch_data_from_gpu when the geo column is on the device.
1427  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1428  // utility function that handles this logic in one place
1429  CHECK_LT(col_idx, targets_.size());
1430  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1431  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1432  " is not a geo column. It is of type " +
1433  targets_[col_idx].sql_type.get_type_name() + ".");
1434  }
1435 
1436  const auto& target_info = targets_[col_idx];
1437  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1438  return false;
1439  }
1440 
1441  if (!lazy_fetch_info_.empty()) {
1442  CHECK_LT(col_idx, lazy_fetch_info_.size());
1443  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1444  return false;
1445  }
1446  }
1447 
1449 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
#define CHECK_LT(x, y)
Definition: Logger.h:207
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
#define IS_GEO(T)
Definition: sqltypes.h:242
+ Here is the call graph for this function:

◆ isNull()

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2178 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), NULL_DOUBLE, null_val_bit_pattern(), and pair_to_double().

Referenced by ResultSet::ResultSetComparator< BUFFER_ITERATOR_TYPE >::operator()().

2180  {
2181  if (ti.get_notnull()) {
2182  return false;
2183  }
2184  if (val.isInt()) {
2185  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2186  }
2187  if (val.isPair()) {
2188  return !val.i2 ||
2189  pair_to_double({val.i1, val.i2}, ti, float_argument_input) == NULL_DOUBLE;
2190  }
2191  if (val.isStr()) {
2192  return !val.i1;
2193  }
2194  CHECK(val.isNull());
2195  return true;
2196 }
bool isNull() const
Definition: TargetValue.h:69
#define NULL_DOUBLE
bool isPair() const
Definition: TargetValue.h:67
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isStr() const
Definition: TargetValue.h:71
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:318
bool isInt() const
Definition: TargetValue.h:65
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isPermutationBufferEmpty()

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 347 of file ResultSet.h.

347 { return permutation_.empty(); };
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714

◆ isRowAtEmpty()

bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 286 of file ResultSetIteration.cpp.

Referenced by parallelRowCount().

286  {
287  if (logical_index >= entryCount()) {
288  return true;
289  }
290  const auto entry_idx =
291  permutation_.empty() ? logical_index : permutation_[logical_index];
292  const auto storage_lookup_result = findStorage(entry_idx);
293  const auto storage = storage_lookup_result.storage_ptr;
294  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
295  return storage->isEmptyEntry(local_entry_idx);
296 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
+ Here is the caller graph for this function:

◆ isTruncated()

bool ResultSet::isTruncated ( ) const

Definition at line 462 of file ResultSet.cpp.

References drop_first_, and keep_first_.

462  {
463  return keep_first_ + drop_first_;
464 }
size_t keep_first_
Definition: ResultSet.h:712
size_t drop_first_
Definition: ResultSet.h:711

◆ isValidationOnlyRes()

bool ResultSet::isValidationOnlyRes ( ) const

Definition at line 474 of file ResultSet.cpp.

References for_validation_only_.

474  {
475  return for_validation_only_;
476 }
bool for_validation_only_
Definition: ResultSet.h:743

◆ isZeroCopyColumnarConversionPossible()

bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 991 of file ResultSet.cpp.

References appended_storage_, QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getQueryDescriptionType(), lazy_fetch_info_, Projection, query_mem_desc_, and storage_.

Referenced by getColumnarBuffer().

991  {
994  appended_storage_.empty() && storage_ &&
995  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
996 }
AppendedStorage appended_storage_
Definition: ResultSet.h:708
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ keepFirstN()

void ResultSet::keepFirstN ( const size_t  n)

Definition at line 46 of file ResultSet.cpp.

References CHECK_EQ.

46  {
48  keep_first_ = n;
49 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t keep_first_
Definition: ResultSet.h:712
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744

◆ lazyReadInt()

int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 650 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, result_set::lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

652  {
653  if (!lazy_fetch_info_.empty()) {
654  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
655  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
656  if (col_lazy_fetch.is_lazily_fetched) {
657  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
658  col_buffers_.size());
659  int64_t ival_copy = ival;
660  auto& frag_col_buffers =
661  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
662  target_logical_idx,
663  ival_copy);
664  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
665  CHECK_LT(target_logical_idx, targets_.size());
666  const TargetInfo& target_info = targets_[target_logical_idx];
667  CHECK(!target_info.is_agg);
668  if (target_info.sql_type.is_string() &&
669  target_info.sql_type.get_compression() == kENCODING_NONE) {
670  VarlenDatum vd;
671  bool is_end{false};
673  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
674  storage_lookup_result.fixedup_entry_idx,
675  false,
676  &vd,
677  &is_end);
678  CHECK(!is_end);
679  if (vd.is_null) {
680  return 0;
681  }
682  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
683  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
684  }
685  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
686  }
687  }
688  return ival;
689 }
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:144
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:143
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
#define CHECK(condition)
Definition: Logger.h:197
size_t length
Definition: sqltypes.h:142
+ Here is the call graph for this function:

◆ makeGeoTargetValue()

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1455 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1459  {
1460  CHECK(target_info.sql_type.is_geometry());
1461 
1462  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1463  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1464  };
1465 
1466  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1467  const auto storage_info = findStorage(entry_buff_idx);
1468  auto crt_geo_col_ptr = geo_target_ptr;
1469  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1470  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1471  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1472  }
1473  // adjusting the column pointer to represent a pointer to the geo target value
1474  return crt_geo_col_ptr +
1475  storage_info.fixedup_entry_idx *
1476  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1477  slot_idx + range);
1478  };
1479 
1480  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1482  ? getNextTargetBufferColWise(slot_idx, range)
1483  : getNextTargetBufferRowWise(slot_idx, range);
1484  };
1485 
1486  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1487  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1489  };
1490 
1491  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1492  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1494  };
1495 
1496  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1497  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1499  };
1500 
1501  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1502  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1504  };
1505 
1506  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1507  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1509  };
1510 
1511  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1512  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1514  };
1515 
1516  auto getFragColBuffers = [&]() -> decltype(auto) {
1517  const auto storage_idx = getStorageIndex(entry_buff_idx);
1518  CHECK_LT(storage_idx.first, col_buffers_.size());
1519  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1520  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1521  };
1522 
1523  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1524 
1525  auto getDataMgr = [&]() {
1526  auto executor = query_mem_desc_.getExecutor();
1527  CHECK(executor);
1528  auto& data_mgr = executor->catalog_->getDataMgr();
1529  return &data_mgr;
1530  };
1531 
1532  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1533  const auto storage_idx = getStorageIndex(entry_buff_idx);
1534  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1535  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1536  return varlen_buffer;
1537  };
1538 
1539  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1540  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1541  return TargetValue(nullptr);
1542  }
1543 
1544  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1545  if (!lazy_fetch_info_.empty()) {
1546  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1547  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1548  }
1549 
1550  switch (target_info.sql_type.get_type()) {
1551  case kPOINT: {
1552  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1553  const auto& varlen_buffer = getSeparateVarlenStorage();
1554  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1555  varlen_buffer.size());
1556 
1557  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1558  target_info.sql_type,
1560  nullptr,
1561  false,
1562  device_id_,
1563  reinterpret_cast<int64_t>(
1564  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1565  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1566  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1567  const auto& frag_col_buffers = getFragColBuffers();
1568  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1569  target_info.sql_type,
1571  frag_col_buffers[col_lazy_fetch->local_col_id],
1572  getCoordsDataPtr(geo_target_ptr));
1573  } else {
1574  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1575  target_info.sql_type,
1577  is_gpu_fetch ? getDataMgr() : nullptr,
1578  is_gpu_fetch,
1579  device_id_,
1580  getCoordsDataPtr(geo_target_ptr),
1581  getCoordsLength(geo_target_ptr));
1582  }
1583  break;
1584  }
1585  case kLINESTRING: {
1586  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1587  const auto& varlen_buffer = getSeparateVarlenStorage();
1588  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1589  varlen_buffer.size());
1590 
1591  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1592  target_info.sql_type,
1594  nullptr,
1595  false,
1596  device_id_,
1597  reinterpret_cast<int64_t>(
1598  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1599  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1600  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1601  const auto& frag_col_buffers = getFragColBuffers();
1602  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1603  target_info.sql_type,
1605  frag_col_buffers[col_lazy_fetch->local_col_id],
1606  getCoordsDataPtr(geo_target_ptr));
1607  } else {
1608  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1609  target_info.sql_type,
1611  is_gpu_fetch ? getDataMgr() : nullptr,
1612  is_gpu_fetch,
1613  device_id_,
1614  getCoordsDataPtr(geo_target_ptr),
1615  getCoordsLength(geo_target_ptr));
1616  }
1617  break;
1618  }
1619  case kPOLYGON: {
1620  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1621  const auto& varlen_buffer = getSeparateVarlenStorage();
1622  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1623  varlen_buffer.size());
1624 
1625  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1626  target_info.sql_type,
1628  nullptr,
1629  false,
1630  device_id_,
1631  reinterpret_cast<int64_t>(
1632  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1633  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1634  reinterpret_cast<int64_t>(
1635  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1636  static_cast<int64_t>(
1637  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1638  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1639  const auto& frag_col_buffers = getFragColBuffers();
1640 
1641  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1642  target_info.sql_type,
1644  frag_col_buffers[col_lazy_fetch->local_col_id],
1645  getCoordsDataPtr(geo_target_ptr),
1646  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1647  getCoordsDataPtr(geo_target_ptr));
1648  } else {
1649  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1650  target_info.sql_type,
1652  is_gpu_fetch ? getDataMgr() : nullptr,
1653  is_gpu_fetch,
1654  device_id_,
1655  getCoordsDataPtr(geo_target_ptr),
1656  getCoordsLength(geo_target_ptr),
1657  getRingSizesPtr(geo_target_ptr),
1658  getRingSizesLength(geo_target_ptr) * 4);
1659  }
1660  break;
1661  }
1662  case kMULTIPOLYGON: {
1663  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1664  const auto& varlen_buffer = getSeparateVarlenStorage();
1665  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1666  varlen_buffer.size());
1667 
1668  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1669  target_info.sql_type,
1671  nullptr,
1672  false,
1673  device_id_,
1674  reinterpret_cast<int64_t>(
1675  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1676  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1677  reinterpret_cast<int64_t>(
1678  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1679  static_cast<int64_t>(
1680  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1681  reinterpret_cast<int64_t>(
1682  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1683  static_cast<int64_t>(
1684  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1685  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1686  const auto& frag_col_buffers = getFragColBuffers();
1687 
1688  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1689  target_info.sql_type,
1691  frag_col_buffers[col_lazy_fetch->local_col_id],
1692  getCoordsDataPtr(geo_target_ptr),
1693  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1694  getCoordsDataPtr(geo_target_ptr),
1695  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1696  getCoordsDataPtr(geo_target_ptr));
1697  } else {
1698  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1699  target_info.sql_type,
1701  is_gpu_fetch ? getDataMgr() : nullptr,
1702  is_gpu_fetch,
1703  device_id_,
1704  getCoordsDataPtr(geo_target_ptr),
1705  getCoordsLength(geo_target_ptr),
1706  getRingSizesPtr(geo_target_ptr),
1707  getRingSizesLength(geo_target_ptr) * 4,
1708  getPolyRingsPtr(geo_target_ptr),
1709  getPolyRingsLength(geo_target_ptr) * 4);
1710  }
1711  break;
1712  }
1713  default:
1714  throw std::runtime_error("Unknown Geometry type encountered: " +
1715  target_info.sql_type.get_type_name());
1716  }
1717  UNREACHABLE();
1718  return TargetValue(nullptr);
1719 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:748
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define UNREACHABLE()
Definition: Logger.h:241
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:739
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const Executor * getExecutor() const
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:660
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool is_geometry() const
Definition: sqltypes.h:490
std::string get_type_name() const
Definition: sqltypes.h:414
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
const bool is_lazily_fetched
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:311
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const int device_id_
Definition: ResultSet.h:705
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeTargetValue()

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1722 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, result_set::lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1728  {
1729  auto actual_compact_sz = compact_sz;
1730  const auto& type_info = target_info.sql_type;
1731  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1733  actual_compact_sz = sizeof(float);
1734  } else {
1735  actual_compact_sz = sizeof(double);
1736  }
1737  if (target_info.is_agg &&
1738  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1739  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1740  target_info.agg_kind == kSINGLE_VALUE)) {
1741  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1742  // padded size to 4 bytes to properly read each value.
1743  actual_compact_sz = sizeof(float);
1744  }
1745  }
1746  if (get_compact_type(target_info).is_date_in_days()) {
1747  // Dates encoded in days are converted to 8 byte values on read.
1748  actual_compact_sz = sizeof(int64_t);
1749  }
1750 
1751  // String dictionary keys are read as 32-bit values regardless of encoding
1752  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1753  type_info.get_comp_param()) {
1754  actual_compact_sz = sizeof(int32_t);
1755  }
1756 
1757  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1758  const auto& chosen_type = get_compact_type(target_info);
1759  if (!lazy_fetch_info_.empty()) {
1760  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1761  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1762  if (col_lazy_fetch.is_lazily_fetched) {
1763  CHECK_GE(ival, 0);
1764  const auto storage_idx = getStorageIndex(entry_buff_idx);
1765  CHECK_LT(storage_idx.first, col_buffers_.size());
1766  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1767  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1768  ival = result_set::lazy_decode(
1769  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1770  if (chosen_type.is_fp()) {
1771  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1772  if (chosen_type.get_type() == kFLOAT) {
1773  return ScalarTargetValue(static_cast<float>(dval));
1774  } else {
1775  return ScalarTargetValue(dval);
1776  }
1777  }
1778  }
1779  }
1780  if (chosen_type.is_fp()) {
1781  switch (actual_compact_sz) {
1782  case 8: {
1783  const auto dval = *reinterpret_cast<const double*>(ptr);
1784  return chosen_type.get_type() == kFLOAT
1785  ? ScalarTargetValue(static_cast<const float>(dval))
1786  : ScalarTargetValue(dval);
1787  }
1788  case 4: {
1789  CHECK_EQ(kFLOAT, chosen_type.get_type());
1790  return *reinterpret_cast<const float*>(ptr);
1791  }
1792  default:
1793  CHECK(false);
1794  }
1795  }
1796  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1797  chosen_type.is_timeinterval()) {
1798  if (is_distinct_target(target_info)) {
1800  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1801  }
1802  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1803  // right type instead
1804  if (inline_int_null_val(chosen_type) ==
1805  int_resize_cast(ival, chosen_type.get_logical_size())) {
1806  return inline_int_null_val(type_info);
1807  }
1808  return ival;
1809  }
1810  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1811  if (translate_strings) {
1812  if (static_cast<int32_t>(ival) ==
1813  NULL_INT) { // TODO(alex): this isn't nice, fix it
1814  return NullableString(nullptr);
1815  }
1816  StringDictionaryProxy* sdp{nullptr};
1817  if (!chosen_type.get_comp_param()) {
1818  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1819  } else {
1820  sdp = catalog_
1821  ? row_set_mem_owner_->getOrAddStringDictProxy(
1822  chosen_type.get_comp_param(), /*with_generation=*/false, catalog_)
1823  : row_set_mem_owner_->getStringDictProxy(
1824  chosen_type.get_comp_param()); // unit tests bypass the catalog
1825  }
1826  return NullableString(sdp->getString(ival));
1827  } else {
1828  return static_cast<int64_t>(static_cast<int32_t>(ival));
1829  }
1830  }
1831  if (chosen_type.is_decimal()) {
1832  if (decimal_to_double) {
1833  if (target_info.is_agg &&
1834  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1835  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1836  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1837  return NULL_DOUBLE;
1838  }
1839  if (ival ==
1840  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1841  return NULL_DOUBLE;
1842  }
1843  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1844  }
1845  return ival;
1846  }
1847  CHECK(false);
1848  return TargetValue(int64_t(0));
1849 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define NULL_DOUBLE
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:716
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
#define NULL_INT
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
Definition: sqldefs.h:75
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:130
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:303
#define CHECK_LT(x, y)
Definition: Logger.h:207
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
#define CHECK(condition)
Definition: Logger.h:197
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Definition: sqldefs.h:74
Definition: sqldefs.h:72
bool is_date_in_days() const
Definition: sqltypes.h:705
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeVarlenTargetValue()

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1298 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), catalog_, CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, copy_from_gpu(), device_id_, device_type_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1305  {
1306  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1307  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1308  if (varlen_ptr < 0) {
1309  CHECK_EQ(-1, varlen_ptr);
1310  if (target_info.sql_type.get_type() == kARRAY) {
1311  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1312  }
1313  return TargetValue(nullptr);
1314  }
1315  const auto storage_idx = getStorageIndex(entry_buff_idx);
1316  if (target_info.sql_type.is_string()) {
1317  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1318  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1319  const auto& varlen_buffer_for_storage =
1320  serialized_varlen_buffer_[storage_idx.first];
1321  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1322  return varlen_buffer_for_storage[varlen_ptr];
1323  } else if (target_info.sql_type.get_type() == kARRAY) {
1324  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1325  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1326  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1327 
1328  return build_array_target_value(
1329  target_info.sql_type,
1330  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1331  varlen_buffer[varlen_ptr].size(),
1332  translate_strings,
1334  catalog_);
1335  } else {
1336  CHECK(false);
1337  }
1338  }
1339  if (!lazy_fetch_info_.empty()) {
1340  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1341  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1342  if (col_lazy_fetch.is_lazily_fetched) {
1343  const auto storage_idx = getStorageIndex(entry_buff_idx);
1344  CHECK_LT(storage_idx.first, col_buffers_.size());
1345  auto& frag_col_buffers =
1346  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1347  bool is_end{false};
1348  if (target_info.sql_type.is_string()) {
1349  VarlenDatum vd;
1350  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1351  frag_col_buffers[col_lazy_fetch.local_col_id])),
1352  varlen_ptr,
1353  false,
1354  &vd,
1355  &is_end);
1356  CHECK(!is_end);
1357  if (vd.is_null) {
1358  return TargetValue(nullptr);
1359  }
1360  CHECK(vd.pointer);
1361  CHECK_GT(vd.length, 0u);
1362  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1363  return fetched_str;
1364  } else {
1365  CHECK(target_info.sql_type.is_array());
1366  ArrayDatum ad;
1367  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1368  frag_col_buffers[col_lazy_fetch.local_col_id])),
1369  varlen_ptr,
1370  &ad,
1371  &is_end);
1372  CHECK(!is_end);
1373  if (ad.is_null) {
1374  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1375  }
1376  CHECK_GE(ad.length, 0u);
1377  if (ad.length > 0) {
1378  CHECK(ad.pointer);
1379  }
1380  return build_array_target_value(target_info.sql_type,
1381  ad.pointer,
1382  ad.length,
1383  translate_strings,
1385  catalog_);
1386  }
1387  }
1388  }
1389  if (!varlen_ptr) {
1390  if (target_info.sql_type.is_array()) {
1391  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1392  }
1393  return TargetValue(nullptr);
1394  }
1395  auto length = read_int_from_buff(ptr2, compact_sz2);
1396  if (target_info.sql_type.is_array()) {
1397  const auto& elem_ti = target_info.sql_type.get_elem_type();
1398  length *= elem_ti.get_array_context_logical_size();
1399  }
1400  std::vector<int8_t> cpu_buffer;
1401  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1402  cpu_buffer.resize(length);
1403  const auto executor = query_mem_desc_.getExecutor();
1404  CHECK(executor);
1405  auto& data_mgr = executor->catalog_->getDataMgr();
1406  copy_from_gpu(&data_mgr,
1407  &cpu_buffer[0],
1408  static_cast<CUdeviceptr>(varlen_ptr),
1409  length,
1410  device_id_);
1411  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1412  }
1413  if (target_info.sql_type.is_array()) {
1414  return build_array_target_value(target_info.sql_type,
1415  reinterpret_cast<const int8_t*>(varlen_ptr),
1416  length,
1417  translate_strings,
1419  catalog_);
1420  }
1421  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1422 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool is_array() const
Definition: sqltypes.h:486
bool is_string() const
Definition: sqltypes.h:478
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:144
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
int get_array_context_logical_size() const
Definition: sqltypes.h:538
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:716
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:739
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:319
#define CHECK_GT(x, y)
Definition: Logger.h:209
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:143
const Executor * getExecutor() const
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:199
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:713
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
TargetValue build_array_target_value(const SQLTypeInfo &array_ti, const int8_t *buff, const size_t buff_sz, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog)
boost::optional< std::vector< ScalarTargetValue > > ArrayTargetValue
Definition: TargetValue.h:157
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:726
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:727
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:697
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:311
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
size_t length
Definition: sqltypes.h:142
const int device_id_
Definition: ResultSet.h:705
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ moveToBegin()

void ResultSet::moveToBegin ( ) const

Definition at line 457 of file ResultSet.cpp.

References crt_row_buff_idx_, and fetched_so_far_.

Referenced by rowCount().

457  {
458  crt_row_buff_idx_ = 0;
459  fetched_so_far_ = 0;
460 }
size_t fetched_so_far_
Definition: ResultSet.h:710
size_t crt_row_buff_idx_
Definition: ResultSet.h:709
+ Here is the caller graph for this function:

◆ parallelRowCount()

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 358 of file ResultSet.cpp.

References gpu_enabled::accumulate(), cpu_threads(), drop_first_, parse_ast::end, entryCount(), g_use_tbb_pool, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), getLimit(), and isRowAtEmpty().

Referenced by rowCount().

358  {
359  auto execute_parallel_row_count = [this](auto counter_threads) -> size_t {
360  const size_t worker_count = cpu_threads();
361  for (size_t i = 0,
362  start_entry = 0,
363  stride = (entryCount() + worker_count - 1) / worker_count;
364  i < worker_count && start_entry < entryCount();
365  ++i, start_entry += stride) {
366  const auto end_entry = std::min(start_entry + stride, entryCount());
367  counter_threads.spawn(
368  [this](const size_t start, const size_t end) {
369  size_t row_count{0};
370  for (size_t i = start; i < end; ++i) {
371  if (!isRowAtEmpty(i)) {
372  ++row_count;
373  }
374  }
375  return row_count;
376  },
377  start_entry,
378  end_entry);
379  }
380  const auto row_counts = counter_threads.join();
381  const size_t row_count = std::accumulate(row_counts.begin(), row_counts.end(), 0);
382  return row_count;
383  };
384  // will fall back to futures threadpool if TBB is not enabled
385  const auto row_count =
387  ? execute_parallel_row_count(threadpool::ThreadPool<size_t>())
388  : execute_parallel_row_count(threadpool::FuturesThreadPool<size_t>());
389 
390  return get_truncated_row_count(row_count, getLimit(), drop_first_);
391 }
size_t entryCount() const
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:284
size_t drop_first_
Definition: ResultSet.h:711
bool g_use_tbb_pool
Definition: Execute.cpp:78
bool isRowAtEmpty(const size_t index) const
size_t getLimit() const
Definition: ResultSet.cpp:954
int cpu_threads()
Definition: thread_count.h:24
DEVICE auto accumulate(ARGS &&... args)
Definition: gpu_enabled.h:42
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parallelTop()

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)
private

Definition at line 594 of file ResultSet.cpp.

References cpu_threads(), createComparator(), DEBUG_TIMER, initPermutationBuffer(), permutation_, and topPermutation().

Referenced by sort().

596  {
597  auto timer = DEBUG_TIMER(__func__);
598  const size_t step = cpu_threads();
599  std::vector<std::vector<uint32_t>> strided_permutations(step);
600  std::vector<std::future<void>> init_futures;
601  for (size_t start = 0; start < step; ++start) {
602  init_futures.emplace_back(
603  std::async(std::launch::async, [this, start, step, &strided_permutations] {
604  strided_permutations[start] = initPermutationBuffer(start, step);
605  }));
606  }
607  for (auto& init_future : init_futures) {
608  init_future.wait();
609  }
610  for (auto& init_future : init_futures) {
611  init_future.get();
612  }
613  auto compare = createComparator(order_entries, true, executor);
614  std::vector<std::future<void>> top_futures;
615  for (auto& strided_permutation : strided_permutations) {
616  top_futures.emplace_back(
617  std::async(std::launch::async, [&strided_permutation, &compare, top_n] {
618  topPermutation(strided_permutation, top_n, compare);
619  }));
620  }
621  for (auto& top_future : top_futures) {
622  top_future.wait();
623  }
624  for (auto& top_future : top_futures) {
625  top_future.get();
626  }
627  permutation_.reserve(strided_permutations.size() * top_n);
628  for (const auto& strided_permutation : strided_permutations) {
629  permutation_.insert(
630  permutation_.end(), strided_permutation.begin(), strided_permutation.end());
631  }
632  topPermutation(permutation_, top_n, compare);
633 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:571
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:855
#define DEBUG_TIMER(name)
Definition: Logger.h:313
int cpu_threads()
Definition: thread_count.h:24
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap, const Executor *executor)
Definition: ResultSet.h:637
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnCpu()

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 914 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), QueryMemoryDescriptor::hasKeylessHash(), query_mem_desc_, sort_groups_cpu(), and storage_.

Referenced by sort().

915  {
916  auto timer = DEBUG_TIMER(__func__);
918  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
919  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
920  CHECK_EQ(size_t(1), order_entries.size());
921  auto buffer_ptr = storage_->getUnderlyingBuffer();
922  for (const auto& order_entry : order_entries) {
923  const auto target_idx = order_entry.tle_no - 1;
924  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
925  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
926  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
927  sort_groups_cpu(sortkey_val_buff,
928  &idx_buff[0],
930  order_entry.is_desc,
931  chosen_bytes);
932  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
933  &idx_buff[0],
935  &tmp_buff[0],
936  sizeof(int64_t));
937  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
938  ++target_idx) {
939  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
940  continue;
941  }
942  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
943  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
944  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
945  apply_permutation_cpu(satellite_val_buff,
946  &idx_buff[0],
948  &tmp_buff[0],
949  chosen_bytes);
950  }
951  }
952 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnGpu()

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 877 of file ResultSet.cpp.

References block_size_, catalog_, CHECK_GT, copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, QueryMemoryDescriptor::getBufferSizeBytes(), Catalog_Namespace::Catalog::getDataMgr(), GPU, grid_size_, inplace_sort_gpu(), KernelPerFragment, query_mem_desc_, and storage_.

Referenced by sort().

878  {
879  auto timer = DEBUG_TIMER(__func__);
880  auto data_mgr = &catalog_->getDataMgr();
881  const int device_id{0};
882  CudaAllocator cuda_allocator(data_mgr, device_id);
883  CHECK_GT(block_size_, 0);
884  CHECK_GT(grid_size_, 0);
885  std::vector<int64_t*> group_by_buffers(block_size_);
886  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
887  auto dev_group_by_buffers =
888  create_dev_group_by_buffers(&cuda_allocator,
889  group_by_buffers,
891  block_size_,
892  grid_size_,
893  device_id,
895  -1,
896  true,
897  true,
898  false,
899  nullptr);
901  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
903  data_mgr,
904  group_by_buffers,
906  dev_group_by_buffers.second,
908  block_size_,
909  grid_size_,
910  device_id,
911  false);
912 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *cuda_allocator, const std::vector< int64_t *> &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:60
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:222
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:716
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
#define CHECK_GT(x, y)
Definition: Logger.h:209
unsigned block_size_
Definition: ResultSet.h:717
void copy_group_by_buffers_from_gpu(Data_Namespace::DataMgr *data_mgr, const std::vector< int64_t *> &group_by_buffers, const size_t groups_buffer_size, const CUdeviceptr group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer)
unsigned grid_size_
Definition: ResultSet.h:718
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rowCount()

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Definition at line 300 of file ResultSet.cpp.

References binSearchRowCount(), cached_row_count_, CHECK_GE, drop_first_, entryCount(), getNextRowUnlocked(), QueryMemoryDescriptor::getQueryDescriptionType(), just_explain_, keep_first_, moveToBegin(), parallelRowCount(), permutation_, Projection, query_mem_desc_, row_iteration_mutex_, and storage_.

300  {
301  if (just_explain_) {
302  return 1;
303  }
304  if (!permutation_.empty()) {
305  if (drop_first_ > permutation_.size()) {
306  return 0;
307  }
308  const auto limited_row_count = keep_first_ + drop_first_;
309  return limited_row_count ? std::min(limited_row_count, permutation_.size())
310  : permutation_.size();
311  }
312  if (cached_row_count_ != -1) {
314  return cached_row_count_;
315  }
316  if (!storage_) {
317  return 0;
318  }
319  if (permutation_.empty() &&
321  return binSearchRowCount();
322  }
323  if (force_parallel || entryCount() > 20000) {
324  return parallelRowCount();
325  }
326  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
327  moveToBegin();
328  size_t row_count{0};
329  while (true) {
330  auto crt_row = getNextRowUnlocked(false, false);
331  if (crt_row.empty()) {
332  break;
333  }
334  ++row_count;
335  }
336  moveToBegin();
337  return row_count;
338 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:745
size_t entryCount() const
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
size_t keep_first_
Definition: ResultSet.h:712
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:742
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
size_t drop_first_
Definition: ResultSet.h:711
size_t binSearchRowCount() const
Definition: ResultSet.cpp:345
size_t parallelRowCount() const
Definition: ResultSet.cpp:358
QueryDescriptionType getQueryDescriptionType() const
void moveToBegin() const
Definition: ResultSet.cpp:457
+ Here is the call graph for this function:

◆ rowIterator() [1/2]

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 191 of file ResultSet.h.

193  {
194  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
195 
196  // move to first logical position
197  ++rowIterator;
198 
199  for (size_t index = 0; index < from_logical_index; index++) {
200  ++rowIterator;
201  }
202 
203  return rowIterator;
204  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:191

◆ rowIterator() [2/2]

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 206 of file ResultSet.h.

207  {
208  return rowIterator(0, translate_strings, decimal_to_double);
209  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:191

◆ serialize()

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const

◆ serializeCountDistinctColumns()

void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private

◆ serializeProjection()

void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private

◆ serializeVarlenAggColumn()

void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private

◆ setCachedRowCount()

void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 340 of file ResultSet.cpp.

References cached_row_count_, and CHECK.

340  {
341  CHECK(cached_row_count_ == -1 || cached_row_count_ == static_cast<int64_t>(row_count));
342  cached_row_count_ = row_count;
343 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
#define CHECK(condition)
Definition: Logger.h:197

◆ setGeoReturnType()

void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 369 of file ResultSet.h.

369 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:748

◆ setKernelQueueTime()

void ResultSet::setKernelQueueTime ( const int64_t  kernel_queue_time)

Definition at line 440 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::kernel_queue_time, and timings_.

440  {
441  timings_.kernel_queue_time = kernel_queue_time;
442 }
QueryExecutionTimings timings_
Definition: ResultSet.h:719

◆ setQueueTime()

void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 436 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::executor_queue_time, and timings_.

436  {
437  timings_.executor_queue_time = queue_time;
438 }
QueryExecutionTimings timings_
Definition: ResultSet.h:719

◆ setSeparateVarlenStorageValid()

void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 401 of file ResultSet.h.

401  {
403  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:740

◆ setValidationOnlyRes()

void ResultSet::setValidationOnlyRes ( )

Definition at line 470 of file ResultSet.cpp.

References for_validation_only_.

470  {
471  for_validation_only_ = true;
472 }
bool for_validation_only_
Definition: ResultSet.h:743

◆ sort()

void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n,
const Executor executor 
)

Definition at line 494 of file ResultSet.cpp.

References Executor::baseline_threshold, baselineSort(), cached_row_count_, canUseFastBaselineSort(), CHECK, CHECK_EQ, CPU, createComparator(), DEBUG_TIMER, doBaselineSort(), entryCount(), g_enable_watchdog, QueryMemoryDescriptor::getEntryCount(), getGpuCount(), GPU, initPermutationBuffer(), LOG, parallelTop(), permutation_, query_mem_desc_, radixSortOnCpu(), radixSortOnGpu(), QueryMemoryDescriptor::sortOnGpu(), sortPermutation(), storage_, targets_, topPermutation(), and logger::WARNING.

Referenced by RelAlgExecutor::executeRelAlgQueryNoRetry(), RelAlgExecutor::executeRelAlgQuerySingleStep(), RelAlgExecutor::executeRelAlgStep(), and RelAlgExecutor::executeSort().

496  {
497  auto timer = DEBUG_TIMER(__func__);
498 
499  if (!storage_) {
500  return;
501  }
503  CHECK(!targets_.empty());
504 #ifdef HAVE_CUDA
505  if (canUseFastBaselineSort(order_entries, top_n)) {
506  baselineSort(order_entries, top_n, executor);
507  return;
508  }
509 #endif // HAVE_CUDA
510  if (query_mem_desc_.sortOnGpu()) {
511  try {
512  radixSortOnGpu(order_entries);
513  } catch (const OutOfMemory&) {
514  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
515  radixSortOnCpu(order_entries);
516  } catch (const std::bad_alloc&) {
517  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
518  radixSortOnCpu(order_entries);
519  }
520  return;
521  }
522  // This check isn't strictly required, but allows the index buffer to be 32-bit.
523  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
524  throw RowSortException("Sorting more than 4B elements not supported");
525  }
526 
527  CHECK(permutation_.empty());
528 
529  const bool use_heap{order_entries.size() == 1 && top_n};
530  if (use_heap && entryCount() > 100000) {
531  if (g_enable_watchdog && (entryCount() > 20000000)) {
532  throw WatchdogException("Sorting the result would be too slow");
533  }
534  parallelTop(order_entries, top_n, executor);
535  return;
536  }
537 
539  throw WatchdogException("Sorting the result would be too slow");
540  }
541 
543 
544  auto compare = createComparator(order_entries, use_heap, executor);
545 
546  if (use_heap) {
547  topPermutation(permutation_, top_n, compare);
548  } else {
549  sortPermutation(compare);
550  }
551 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t entryCount() const
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:914
#define LOG(tag)
Definition: Logger.h:188
static const size_t baseline_threshold
Definition: Execute.h:1021
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:877
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
Definition: ResultSet.cpp:594
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:744
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:571
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:703
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:855
void sortPermutation(const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:871
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n, const Executor *executor)
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
bool g_enable_watchdog
Definition: Execute.cpp:76
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap, const Executor *executor)
Definition: ResultSet.h:637
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ sortPermutation()

void ResultSet::sortPermutation ( const std::function< bool(const uint32_t, const uint32_t)>  compare)
private

Definition at line 871 of file ResultSet.cpp.

References DEBUG_TIMER, permutation_, and gpu_enabled::sort().

Referenced by sort().

872  {
873  auto timer = DEBUG_TIMER(__func__);
874  std::sort(permutation_.begin(), permutation_.end(), compare);
875 }
DEVICE void sort(ARGS &&... args)
Definition: gpu_enabled.h:105
std::vector< uint32_t > permutation_
Definition: ResultSet.h:714
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ syncEstimatorBuffer()

void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 421 of file ResultSet.cpp.

References CHECK, CHECK_EQ, checked_calloc(), copy_from_gpu(), data_mgr_, device_estimator_buffer_, device_id_, device_type_, estimator_, Data_Namespace::AbstractBuffer::getMemoryPtr(), GPU, and host_estimator_buffer_.

421  {
424  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
426  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
428  auto device_buffer_ptr = device_estimator_buffer_->getMemoryPtr();
431  reinterpret_cast<CUdeviceptr>(device_buffer_ptr),
432  estimator_->getBufferSize(),
433  device_id_);
434 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
virtual int8_t * getMemoryPtr()=0
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:52
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:734
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:731
int8_t * host_estimator_buffer_
Definition: ResultSet.h:733
const ExecutorDeviceType device_type_
Definition: ResultSet.h:704
#define CHECK(condition)
Definition: Logger.h:197
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:732
const int device_id_
Definition: ResultSet.h:705
+ Here is the call graph for this function:

◆ topPermutation()

void ResultSet::topPermutation ( std::vector< uint32_t > &  to_sort,
const size_t  n,
const std::function< bool(const uint32_t, const uint32_t)>  compare 
)
staticprivate

Definition at line 855 of file ResultSet.cpp.

References DEBUG_TIMER.

Referenced by parallelTop(), and sort().

858  {
859  auto timer = DEBUG_TIMER(__func__);
860  std::make_heap(to_sort.begin(), to_sort.end(), compare);
861  std::vector<uint32_t> permutation_top;
862  permutation_top.reserve(n);
863  for (size_t i = 0; i < n && !to_sort.empty(); ++i) {
864  permutation_top.push_back(to_sort.front());
865  std::pop_heap(to_sort.begin(), to_sort.end(), compare);
866  to_sort.pop_back();
867  }
868  to_sort.swap(permutation_top);
869 }
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the caller graph for this function:

◆ unserialize()

static std::unique_ptr<ResultSet> ResultSet::unserialize ( const TSerializedRows &  serialized_rows,
const Executor  
)
static

◆ unserializeCountDistinctColumns()

void ResultSet::unserializeCountDistinctColumns ( const TSerializedRows &  )
private

◆ updateStorageEntryCount()

void ResultSet::updateStorageEntryCount ( const size_t  new_entry_count)
inline

Definition at line 219 of file ResultSet.h.

References File_Namespace::append(), CHECK, anonymous_namespace{TypedDataAccessors.h}::decimal_to_double(), Projection, and gpu_enabled::sort().

219  {
221  query_mem_desc_.setEntryCount(new_entry_count);
222  CHECK(storage_);
223  storage_->updateEntryCount(new_entry_count);
224  }
void setEntryCount(const size_t val)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:706
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:707
#define CHECK(condition)
Definition: Logger.h:197
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

Friends And Related Function Documentation

◆ ColumnarResults

friend class ColumnarResults
friend

Definition at line 757 of file ResultSet.h.

◆ ResultSetManager

friend class ResultSetManager
friend

Definition at line 755 of file ResultSet.h.

◆ ResultSetRowIterator

friend class ResultSetRowIterator
friend

Definition at line 756 of file ResultSet.h.

Member Data Documentation

◆ appended_storage_

◆ block_size_

unsigned ResultSet::block_size_ {0}
private

Definition at line 717 of file ResultSet.h.

Referenced by radixSortOnGpu(), and ResultSet().

◆ cached_row_count_

std::atomic<int64_t> ResultSet::cached_row_count_
mutableprivate

Definition at line 744 of file ResultSet.h.

Referenced by append(), ResultSet(), rowCount(), setCachedRowCount(), and sort().

◆ catalog_

const Catalog_Namespace::Catalog* ResultSet::catalog_
private

◆ chunk_iters_

std::vector<std::shared_ptr<std::list<ChunkIter> > > ResultSet::chunk_iters_
private

Definition at line 722 of file ResultSet.h.

Referenced by append().

◆ chunks_

std::list<std::shared_ptr<Chunk_NS::Chunk> > ResultSet::chunks_
private

Definition at line 721 of file ResultSet.h.

Referenced by append().

◆ col_buffers_

std::vector<std::vector<std::vector<const int8_t*> > > ResultSet::col_buffers_
private

◆ column_wise_comparator_

std::unique_ptr<ResultSetComparator<ColumnWiseTargetAccessor> > ResultSet::column_wise_comparator_
private

Definition at line 753 of file ResultSet.h.

◆ consistent_frag_sizes_

std::vector<std::vector<int64_t> > ResultSet::consistent_frag_sizes_
private

Definition at line 729 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ crt_row_buff_idx_

size_t ResultSet::crt_row_buff_idx_
mutableprivate

◆ data_mgr_

Data_Namespace::DataMgr* ResultSet::data_mgr_
private

Definition at line 734 of file ResultSet.h.

Referenced by ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ device_estimator_buffer_

Data_Namespace::AbstractBuffer* ResultSet::device_estimator_buffer_ {nullptr}
private

Definition at line 732 of file ResultSet.h.

Referenced by getDeviceEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ device_id_

const int ResultSet::device_id_
private

◆ device_type_

◆ drop_first_

size_t ResultSet::drop_first_
private

◆ estimator_

const std::shared_ptr<const Analyzer::Estimator> ResultSet::estimator_
private

Definition at line 731 of file ResultSet.h.

Referenced by definitelyHasNoRows(), ResultSet(), and syncEstimatorBuffer().

◆ explanation_

std::string ResultSet::explanation_
private

Definition at line 741 of file ResultSet.h.

◆ fetched_so_far_

size_t ResultSet::fetched_so_far_
mutableprivate

Definition at line 710 of file ResultSet.h.

Referenced by moveToBegin(), and ResultSet().

◆ for_validation_only_

bool ResultSet::for_validation_only_
private

Definition at line 743 of file ResultSet.h.

Referenced by isValidationOnlyRes(), ResultSet(), and setValidationOnlyRes().

◆ frag_offsets_

std::vector<std::vector<std::vector<int64_t> > > ResultSet::frag_offsets_
private

Definition at line 728 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ geo_return_type_

GeoReturnType ResultSet::geo_return_type_
mutableprivate

Definition at line 748 of file ResultSet.h.

Referenced by makeGeoTargetValue(), and ResultSet().

◆ grid_size_

unsigned ResultSet::grid_size_ {0}
private

Definition at line 718 of file ResultSet.h.

Referenced by radixSortOnGpu(), and ResultSet().

◆ host_estimator_buffer_

int8_t* ResultSet::host_estimator_buffer_ {nullptr}
mutableprivate

Definition at line 733 of file ResultSet.h.

Referenced by getHostEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ just_explain_

const bool ResultSet::just_explain_
private

Definition at line 742 of file ResultSet.h.

Referenced by colCount(), definitelyHasNoRows(), getColType(), isExplain(), ResultSet(), and rowCount().

◆ keep_first_

size_t ResultSet::keep_first_
private

Definition at line 712 of file ResultSet.h.

Referenced by advanceCursorToNextEntry(), getLimit(), isTruncated(), ResultSet(), and rowCount().

◆ lazy_fetch_info_

const std::vector<ColumnLazyFetchInfo> ResultSet::lazy_fetch_info_
private

◆ literal_buffers_

std::vector<std::vector<int8_t> > ResultSet::literal_buffers_
private

Definition at line 725 of file ResultSet.h.

Referenced by append().

◆ permutation_

std::vector<uint32_t> ResultSet::permutation_
private

◆ query_mem_desc_

◆ ResultSetBuilder

friend ResultSet::ResultSetBuilder

Definition at line 154 of file ResultSet.h.

◆ row_iteration_mutex_

std::mutex ResultSet::row_iteration_mutex_
mutableprivate

Definition at line 745 of file ResultSet.h.

Referenced by rowCount().

◆ row_set_mem_owner_

◆ row_wise_comparator_

std::unique_ptr<ResultSetComparator<RowWiseTargetAccessor> > ResultSet::row_wise_comparator_
private

Definition at line 752 of file ResultSet.h.

◆ separate_varlen_storage_valid_

bool ResultSet::separate_varlen_storage_valid_
private

◆ serialized_varlen_buffer_

std::vector<SerializedVarlenBufferStorage> ResultSet::serialized_varlen_buffer_
private

Definition at line 739 of file ResultSet.h.

Referenced by append(), makeGeoTargetValue(), and makeVarlenTargetValue().

◆ storage_

◆ targets_

◆ timings_

QueryExecutionTimings ResultSet::timings_
private

The documentation for this class was generated from the following files: