OmniSciDB  2e3a973ef4
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t *>> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 
void setCachedRowCount (const size_t row_count) const
 
size_t entryCount () const
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr< RowSetMemoryOwnergetRowSetMemOwner () const
 
const std::vector< uint32_t > & getPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >, size_t > getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector< ColumnLazyFetchInfo > & getLazyFetchInfo () const
 
void setSeparateVarlenStorageValid (const bool val)
 
std::shared_ptr< const std::vector< std::string > > getStringDictionaryPayloadCopy (const int dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 

Private Types

using BufferSet = std::set< int64_t >
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getColumnInternal (const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const int8_t * > & getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
std::function< bool(const uint32_t, const uint32_t)> createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
 
void sortPermutation (const std::function< bool(const uint32_t, const uint32_t)> compare)
 
std::vector< uint32_t > initPermutationBuffer (const size_t start, const size_t step)
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (BufferSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static void topPermutation (std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr< RowSetMemoryOwnerrow_set_mem_owner_
 
std::vector< uint32_t > permutation_
 
QueryExecutionTimings timings_
 
const Executorexecutor_
 
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
 
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
 
std::vector< std::vector< int8_t > > literal_buffers_
 
const std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
 
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
 
std::vector< std::vector< int64_t > > consistent_frag_sizes_
 
const std::shared_ptr< const Analyzer::Estimatorestimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector< SerializedVarlenBufferStorageserialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
std::atomic< int64_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
 
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 304 of file ResultSet.h.

Member Typedef Documentation

◆ BufferSet

using ResultSet::BufferSet = std::set<int64_t>
private

Definition at line 830 of file ResultSet.h.

◆ SerializedVarlenBufferStorage

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 868 of file ResultSet.h.

Member Enumeration Documentation

◆ GeoReturnType

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 499 of file ResultSet.h.

499  {
502  WktString,
505  GeoTargetValueGpuPtr
507  };
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue > > GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

◆ ResultSet() [1/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

Definition at line 104 of file ResultSet.cpp.

References CudaAllocator::allocGpuAbstractBuffer(), cached_row_count_, checked_calloc(), col_buffers_, consistent_frag_sizes_, crt_row_buff_idx_, data_mgr_, device_estimator_buffer_, device_id_, device_type_, drop_first_, estimator_, executor_, fetched_so_far_, frag_offsets_, geo_return_type_, Data_Namespace::DataMgr::getCudaMgr(), Data_Namespace::AbstractBuffer::getMemoryPtr(), GPU, host_estimator_buffer_, just_explain_, keep_first_, lazy_fetch_info_, query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, targets_, WktString, and CudaMgr_Namespace::CudaMgr::zeroDeviceMem().

109  : targets_(targets)
110  , device_type_(device_type)
111  , device_id_(-1)
112  , query_mem_desc_(query_mem_desc)
113  , crt_row_buff_idx_(0)
114  , fetched_so_far_(0)
115  , drop_first_(0)
116  , keep_first_(0)
117  , row_set_mem_owner_(row_set_mem_owner)
118  , executor_(executor)
119  , data_mgr_(nullptr)
121  , just_explain_(false)
122  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
size_t keep_first_
Definition: ResultSet.h:845
const bool just_explain_
Definition: ResultSet.h:873
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
size_t drop_first_
Definition: ResultSet.h:844
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838
+ Here is the call graph for this function:

◆ ResultSet() [2/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t *>> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

◆ ResultSet() [3/5]

ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)

◆ ResultSet() [4/5]

ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 181 of file ResultSet.cpp.

183  , device_id_(-1)
184  , fetched_so_far_(0)
186  , explanation_(explanation)
187  , just_explain_(true)
188  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const bool just_explain_
Definition: ResultSet.h:873
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
std::string explanation_
Definition: ResultSet.h:872
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838

◆ ResultSet() [5/5]

ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 191 of file ResultSet.cpp.

References cached_row_count_, geo_return_type_, just_explain_, separate_varlen_storage_valid_, and WktString.

195  , device_id_(-1)
196  , fetched_so_far_(0)
197  , row_set_mem_owner_(row_set_mem_owner)
198  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
200  , just_explain_(true)
201  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const bool just_explain_
Definition: ResultSet.h:873
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
QueryExecutionTimings timings_
Definition: ResultSet.h:849
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838

◆ ~ResultSet()

ResultSet::~ResultSet ( )

Definition at line 204 of file ResultSet.cpp.

References appended_storage_, CHECK, CPU, data_mgr_, device_estimator_buffer_, device_type_, Data_Namespace::DataMgr::free(), host_estimator_buffer_, and storage_.

204  {
205  if (storage_) {
206  if (!storage_->buff_is_provided_) {
207  CHECK(storage_->getUnderlyingBuffer());
208  free(storage_->getUnderlyingBuffer());
209  }
210  }
211  for (auto& storage : appended_storage_) {
212  if (storage && !storage->buff_is_provided_) {
213  free(storage->getUnderlyingBuffer());
214  }
215  }
219  }
221  CHECK(data_mgr_);
223  }
224 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:461
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863
+ Here is the call graph for this function:

Member Function Documentation

◆ addCompilationQueueTime()

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 479 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::compilation_queue_time, and timings_.

479  {
480  timings_.compilation_queue_time += compilation_queue_time;
481 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849

◆ advanceCursorToNextEntry() [1/2]

void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private

Definition at line 693 of file ResultSetIteration.cpp.

References CHECK_LE, ResultSetRowIterator::crt_row_buff_idx_, drop_first_, entryCount(), ResultSetRowIterator::fetched_so_far_, findStorage(), ResultSetRowIterator::global_entry_idx_, ResultSetRowIterator::global_entry_idx_valid_, keep_first_, and permutation_.

693  {
695  iter.global_entry_idx_valid_ = false;
696  return;
697  }
698 
699  while (iter.crt_row_buff_idx_ < entryCount()) {
700  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
702  const auto storage_lookup_result = findStorage(entry_idx);
703  const auto storage = storage_lookup_result.storage_ptr;
704  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
705  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
706  if (iter.fetched_so_far_ < drop_first_) {
707  ++iter.fetched_so_far_;
708  } else {
709  break;
710  }
711  }
712  ++iter.crt_row_buff_idx_;
713  }
714  if (permutation_.empty()) {
716  } else {
718  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
719  ? iter.crt_row_buff_idx_
721  }
722 
724 
725  if (iter.global_entry_idx_valid_) {
726  ++iter.crt_row_buff_idx_;
727  ++iter.fetched_so_far_;
728  }
729 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:845
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
size_t global_entry_idx_
Definition: ResultSet.h:278
size_t drop_first_
Definition: ResultSet.h:844
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:277
bool global_entry_idx_valid_
Definition: ResultSet.h:279
+ Here is the call graph for this function:

◆ advanceCursorToNextEntry() [2/2]

size_t ResultSet::advanceCursorToNextEntry ( ) const
private

Definition at line 733 of file ResultSetIteration.cpp.

References CHECK_LE, crt_row_buff_idx_, entryCount(), findStorage(), and permutation_.

733  {
734  while (crt_row_buff_idx_ < entryCount()) {
735  const auto entry_idx =
737  const auto storage_lookup_result = findStorage(entry_idx);
738  const auto storage = storage_lookup_result.storage_ptr;
739  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
740  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
741  break;
742  }
744  }
745  if (permutation_.empty()) {
746  return crt_row_buff_idx_;
747  }
751 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
+ Here is the call graph for this function:

◆ allocateStorage() [1/3]

const ResultSetStorage * ResultSet::allocateStorage ( ) const

Definition at line 230 of file ResultSet.cpp.

References CHECK, device_type_, QueryMemoryDescriptor::getBufferSizeBytes(), query_mem_desc_, row_set_mem_owner_, storage_, and targets_.

230  {
231  CHECK(!storage_);
233  auto buff =
235  storage_.reset(
236  new ResultSetStorage(targets_, query_mem_desc_, buff, /*buff_is_provided=*/true));
237  return storage_.get();
238 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ allocateStorage() [2/3]

const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &   
) const

◆ allocateStorage() [3/3]

const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const

◆ append()

void ResultSet::append ( ResultSet that)

Definition at line 270 of file ResultSet.cpp.

References appended_storage_, cached_row_count_, CHECK, CHECK_EQ, chunk_iters_, chunks_, col_buffers_, consistent_frag_sizes_, frag_offsets_, QueryMemoryDescriptor::getEntryCount(), literal_buffers_, query_mem_desc_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and QueryMemoryDescriptor::setEntryCount().

270  {
272  if (!that.storage_) {
273  return;
274  }
275  appended_storage_.push_back(std::move(that.storage_));
278  appended_storage_.back()->query_mem_desc_.getEntryCount());
279  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
280  col_buffers_.insert(
281  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
282  frag_offsets_.insert(
283  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
285  that.consistent_frag_sizes_.begin(),
286  that.consistent_frag_sizes_.end());
287  chunk_iters_.insert(
288  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
290  CHECK(that.separate_varlen_storage_valid_);
292  that.serialized_varlen_buffer_.begin(),
293  that.serialized_varlen_buffer_.end());
294  }
295  for (auto& buff : that.literal_buffers_) {
296  literal_buffers_.push_back(std::move(buff));
297  }
298 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:853
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:852
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:856
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:860
#define CHECK(condition)
Definition: Logger.h:197
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:859
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
+ Here is the call graph for this function:

◆ baselineSort()

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ binSearchRowCount()

size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 380 of file ResultSet.cpp.

References appended_storage_, drop_first_, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), getLimit(), and storage_.

Referenced by rowCount().

380  {
381  if (!storage_) {
382  return 0;
383  }
384 
385  size_t row_count = storage_->binSearchRowCount();
386  for (auto& s : appended_storage_) {
387  row_count += s->binSearchRowCount();
388  }
389 
390  return get_truncated_row_count(row_count, getLimit(), drop_first_);
391 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:319
size_t drop_first_
Definition: ResultSet.h:844
size_t getLimit() const
Definition: ResultSet.cpp:991
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ canUseFastBaselineSort()

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ colCount()

size_t ResultSet::colCount ( ) const

Definition at line 304 of file ResultSet.cpp.

References just_explain_, and targets_.

304  {
305  return just_explain_ ? 1 : targets_.size();
306 }
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836

◆ copyColumnIntoBuffer()

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copy its content into a contiguous output_buffer

Definition at line 1172 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1174  {
1176  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1177  CHECK(output_buffer_size > 0);
1178  CHECK(output_buffer);
1179  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1180  size_t out_buff_offset = 0;
1181 
1182  // the main storage:
1183  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1184  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1185  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1186  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1187  CHECK(crt_buffer_size <= output_buffer_size);
1188  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1189 
1190  out_buff_offset += crt_buffer_size;
1191 
1192  // the appended storages:
1193  for (size_t i = 0; i < appended_storage_.size(); i++) {
1194  const size_t crt_storage_row_count =
1195  appended_storage_[i]->query_mem_desc_.getEntryCount();
1196  if (crt_storage_row_count == 0) {
1197  // skip an empty appended storage
1198  continue;
1199  }
1200  CHECK_LT(out_buff_offset, output_buffer_size);
1201  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1202  const size_t column_offset =
1203  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1204  const int8_t* storage_buffer =
1205  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1206  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1207  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1208 
1209  out_buff_offset += crt_buffer_size;
1210  }
1211 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1018
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ create_active_buffer_set()

void ResultSet::create_active_buffer_set ( BufferSet count_distinct_active_buffer_set) const
private

◆ createComparator()

std::function<bool(const uint32_t, const uint32_t)> ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const bool  use_heap 
)
inlineprivate

Definition at line 774 of file ResultSet.h.

References DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

Referenced by parallelTop(), and sort().

776  {
777  auto timer = DEBUG_TIMER(__func__);
780  std::make_unique<ResultSetComparator<ColumnWiseTargetAccessor>>(
781  order_entries, use_heap, this);
782  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
783  return (*this->column_wise_comparator_)(lhs, rhs);
784  };
785  } else {
786  row_wise_comparator_ = std::make_unique<ResultSetComparator<RowWiseTargetAccessor>>(
787  order_entries, use_heap, this);
788  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
789  return (*this->row_wise_comparator_)(lhs, rhs);
790  };
791  }
792  }
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
Definition: ResultSet.h:883
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
Definition: ResultSet.h:882
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ definitelyHasNoRows()

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 428 of file ResultSet.cpp.

References estimator_, just_explain_, and storage_.

428  {
429  return !storage_ && !estimator_ && !just_explain_;
430 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const bool just_explain_
Definition: ResultSet.h:873
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862

◆ didOutputColumnar()

bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 517 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

517 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
+ Here is the call graph for this function:

◆ doBaselineSort()

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ dropFirstN()

void ResultSet::dropFirstN ( const size_t  n)

Definition at line 99 of file ResultSet.cpp.

References CHECK_EQ.

99  {
101  drop_first_ = n;
102 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
size_t drop_first_
Definition: ResultSet.h:844

◆ entryCount()

size_t ResultSet::entryCount ( ) const

Definition at line 753 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

Referenced by advanceCursorToNextEntry(), parallelRowCount(), rowCount(), and sort().

753  {
754  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
755 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fillOneEntry()

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 461 of file ResultSet.h.

References CHECK.

461  {
462  CHECK(storage_);
463  if (storage_->query_mem_desc_.didOutputColumnar()) {
464  storage_->fillOneEntryColWise(entry);
465  } else {
466  storage_->fillOneEntryRowWise(entry);
467  }
468  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197

◆ findStorage()

ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 684 of file ResultSet.cpp.

References appended_storage_, getStorageIndex(), and storage_.

Referenced by advanceCursorToNextEntry(), initPermutationBuffer(), and makeGeoTargetValue().

684  {
685  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
686  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
687  fixedup_entry_idx,
688  stg_idx};
689 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:659
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixupCountDistinctPointers()

void ResultSet::fixupCountDistinctPointers ( )
private

◆ fixupQueryMemoryDescriptor()

QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 509 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and QueryMemoryDescriptor::resetGroupColWidths().

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

510  {
511  auto query_mem_desc_copy = query_mem_desc;
512  query_mem_desc_copy.resetGroupColWidths(
513  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
514  if (query_mem_desc.didOutputColumnar()) {
515  return query_mem_desc_copy;
516  }
517  query_mem_desc_copy.alignPaddedSlots();
518  return query_mem_desc_copy;
519 }
void resetGroupColWidths(const std::vector< int8_t > &new_group_col_widths)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getBufferSizeBytes()

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 757 of file ResultSetIteration.cpp.

References CHECK, and storage_.

757  {
758  CHECK(storage_);
759  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
760 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197

◆ getColType()

SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 308 of file ResultSet.cpp.

References CHECK_LT, just_explain_, kAVG, kDOUBLE, kTEXT, and targets_.

308  {
309  if (just_explain_) {
310  return SQLTypeInfo(kTEXT, false);
311  }
312  CHECK_LT(col_idx, targets_.size());
313  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
314  : targets_[col_idx].sql_type;
315 }
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
#define CHECK_LT(x, y)
Definition: Logger.h:207
Definition: sqltypes.h:54
Definition: sqldefs.h:72

◆ getColumnarBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1344 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1346  {
1347  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1348  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1349  const auto column_offset =
1350  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1351  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1352  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1353  storage_->query_mem_desc_.getEntryCount();
1354  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1355  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1356 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206

◆ getColumnarBuffer()

const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1043 of file ResultSet.cpp.

References CHECK, QueryMemoryDescriptor::getColOffInBytes(), isZeroCopyColumnarConversionPossible(), query_mem_desc_, and storage_.

1043  {
1045  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1046 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1036
#define CHECK(condition)
Definition: Logger.h:197
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:

◆ getColumnarPerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarPerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1290 of file ResultSetIteration.cpp.

References storage_.

1292  {
1293  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1294  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1295  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1296 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840

◆ getColumnFrag()

const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1143 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1145  {
1146  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1147  if (col_buffers_[storage_idx].size() > 1) {
1148  int64_t frag_id = 0;
1149  int64_t local_idx = global_idx;
1150  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1151  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1152  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1153  } else {
1154  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1155  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1156  CHECK_LE(local_idx, global_idx);
1157  }
1158  CHECK_GE(frag_id, int64_t(0));
1159  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1160  global_idx = local_idx;
1161  return col_buffers_[storage_idx][frag_id];
1162  } else {
1163  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1164  return col_buffers_[storage_idx][0];
1165  }
1166 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define CHECK_GE(x, y)
Definition: Logger.h:210
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK_LE(x, y)
Definition: Logger.h:208
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:860
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:859
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getColumnInternal()

InternalTargetValue ResultSet::getColumnInternal ( const int8_t *  buff,
const size_t  entry_idx,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

◆ getCurrentRowBufferIndex()

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 262 of file ResultSet.cpp.

References crt_row_buff_idx_.

262  {
263  if (crt_row_buff_idx_ == 0) {
264  throw std::runtime_error("current row buffer iteration index is undefined");
265  }
266  return crt_row_buff_idx_ - 1;
267 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:842

◆ getDataManager()

Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private

◆ getDeviceEstimatorBuffer()

int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 446 of file ResultSet.cpp.

References CHECK, device_estimator_buffer_, device_type_, Data_Namespace::AbstractBuffer::getMemoryPtr(), and GPU.

446  {
450 }
virtual int8_t * getMemoryPtr()=0
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863
+ Here is the call graph for this function:

◆ getDeviceId()

int ResultSet::getDeviceId ( ) const

Definition at line 505 of file ResultSet.cpp.

References device_id_.

505  {
506  return device_id_;
507 }
const int device_id_
Definition: ResultSet.h:838

◆ getDeviceType()

ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 226 of file ResultSet.cpp.

References device_type_.

226  {
227  return device_type_;
228 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837

◆ getDistinctBufferRefFromBufferRowwise()

int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private

◆ getEntryAt() [1/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

◆ getEntryAt() [2/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1214 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1216  {
1217  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1218  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1219  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1220  } else {
1221  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1222  }
1223  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1224  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1225  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1226  } else {
1227  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1228  }
1229  } else {
1230  UNREACHABLE() << "Invalid query type is used";
1231  return 0;
1232  }
1233 }
#define UNREACHABLE()
Definition: Logger.h:241

◆ getGeoReturnType()

GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 508 of file ResultSet.h.

508 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:878

◆ getGpuCount()

int ResultSet::getGpuCount ( ) const
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ getHostEstimatorBuffer()

int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 452 of file ResultSet.cpp.

References host_estimator_buffer_.

452  {
453  return host_estimator_buffer_;
454 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864

◆ getLazyFetchInfo()

const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 537 of file ResultSet.h.

537  {
538  return lazy_fetch_info_;
539  }
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857

◆ getLimit()

size_t ResultSet::getLimit ( ) const

Definition at line 991 of file ResultSet.cpp.

References keep_first_.

Referenced by binSearchRowCount(), and parallelRowCount().

991  {
992  return keep_first_;
993 }
size_t keep_first_
Definition: ResultSet.h:845
+ Here is the caller graph for this function:

◆ getNDVEstimator()

size_t ResultSet::getNDVEstimator ( ) const

Definition at line 33 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, and CHECK_LE.

33  {
34  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
36  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
37  const auto total_bits = estimator_->getBufferSize() * 8;
38  CHECK_LE(bits_set, total_bits);
39  const auto unset_bits = total_bits - bits_set;
40  const auto ratio = static_cast<double>(unset_bits) / total_bits;
41  if (ratio == 0.) {
42  throw std::runtime_error("Failed to get a high quality cardinality estimation");
43  }
44  return -static_cast<double>(total_bits) * log(ratio);
45 }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862
#define CHECK_LE(x, y)
Definition: Logger.h:208
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
#define CHECK(condition)
Definition: Logger.h:197
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37
+ Here is the call graph for this function:

◆ getNextRow()

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 294 of file ResultSetIteration.cpp.

295  {
296  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
297  if (!storage_ && !just_explain_) {
298  return {};
299  }
300  return getNextRowUnlocked(translate_strings, decimal_to_double);
301 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:875
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:873

◆ getNextRowImpl()

std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 316 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

317  {
318  size_t entry_buff_idx = 0;
319  do {
321  return {};
322  }
323 
324  entry_buff_idx = advanceCursorToNextEntry();
325 
326  if (crt_row_buff_idx_ >= entryCount()) {
328  return {};
329  }
331  ++fetched_so_far_;
332 
333  } while (drop_first_ && fetched_so_far_ <= drop_first_);
334 
335  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
336  CHECK(!row.empty());
337 
338  return row;
339 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:845
std::vector< TargetValue > getRowAt(const size_t index) const
size_t drop_first_
Definition: ResultSet.h:844
#define CHECK(condition)
Definition: Logger.h:197
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
size_t advanceCursorToNextEntry() const

◆ getNextRowUnlocked()

std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 303 of file ResultSetIteration.cpp.

Referenced by rowCount().

305  {
306  if (just_explain_) {
307  if (fetched_so_far_) {
308  return {};
309  }
310  fetched_so_far_ = 1;
311  return {explanation_};
312  }
313  return getNextRowImpl(translate_strings, decimal_to_double);
314 }
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:873
std::string explanation_
Definition: ResultSet.h:872
size_t fetched_so_far_
Definition: ResultSet.h:843
+ Here is the caller graph for this function:

◆ getOneColRow()

OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 232 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), getRowAt(), and row_ptr_rowwise().

232  {
233  const auto storage_lookup_result = findStorage(global_entry_idx);
234  const auto storage = storage_lookup_result.storage_ptr;
235  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
236  if (storage->isEmptyEntry(local_entry_idx)) {
237  return {0, false};
238  }
239  const auto buff = storage->buff_;
240  CHECK(buff);
242  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
243  const auto key_bytes_with_padding =
245  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
246  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
247  keys_ptr,
248  global_entry_idx,
249  targets_.front(),
250  0,
251  0,
252  false,
253  false,
254  false);
255  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
256  CHECK(scalar_tv);
257  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
258  CHECK(ival_ptr);
259  return {*ival_ptr, true};
260 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
#define CHECK(condition)
Definition: Logger.h:197
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:

◆ getPaddedSlotWidthBytes()

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 526 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and ResultSetStorage::query_mem_desc_.

526  {
527  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
528  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
+ Here is the call graph for this function:

◆ getPermutationBuffer()

const std::vector< uint32_t > & ResultSet::getPermutationBuffer ( ) const

Definition at line 615 of file ResultSet.cpp.

References permutation_.

615  {
616  return permutation_;
617 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847

◆ getQueryDescriptionType()

QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 522 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and ResultSetStorage::query_mem_desc_.

522  {
524  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

◆ getQueryMemDesc()

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 432 of file ResultSet.cpp.

References CHECK, and storage_.

432  {
433  CHECK(storage_);
434  return storage_->query_mem_desc_;
435 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197

◆ getQueueTime()

◆ getRenderTime()

int64_t ResultSet::getRenderTime ( ) const

Definition at line 488 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::render_time, and timings_.

488  {
489  return timings_.render_time;
490 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849

◆ getRowAt() [1/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const

Referenced by get_byteoff_of_slot(), and getOneColRow().

+ Here is the caller graph for this function:

◆ getRowAt() [2/3]

TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const

◆ getRowAt() [3/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private

◆ getRowAtNoTranslations()

std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 271 of file ResultSetIteration.cpp.

273  {
274  if (logical_index >= entryCount()) {
275  return {};
276  }
277  const auto entry_idx =
278  permutation_.empty() ? logical_index : permutation_[logical_index];
279  return getRowAt(entry_idx, false, false, false, targets_to_skip);
280 }
size_t entryCount() const
std::vector< TargetValue > getRowAt(const size_t index) const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847

◆ getRowSetMemOwner()

std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 482 of file ResultSet.h.

482  {
483  return row_set_mem_owner_;
484  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846

◆ getRowWiseBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWiseBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1322 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1324  {
1325  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1326  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1327  auto keys_ptr = row_ptr_rowwise(
1328  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1329  const auto column_offset =
1330  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1331  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1332  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1333  const auto storage_buffer = keys_ptr + column_offset;
1334  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1335 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
+ Here is the call graph for this function:

◆ getRowWisePerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWisePerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1305 of file ResultSetIteration.cpp.

References storage_.

1307  {
1308  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1309  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1310  const int8_t* storage_buffer =
1311  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1312  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1313 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840

◆ getSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1049 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), kAVG, and targets_.

Referenced by getSupportedSingleSlotTargetBitmap().

1049  {
1050  std::vector<bool> target_bitmap(targets_.size(), true);
1051  size_t num_single_slot_targets = 0;
1052  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1053  const auto& sql_type = targets_[target_idx].sql_type;
1054  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1055  target_bitmap[target_idx] = false;
1056  } else if (sql_type.is_varlen()) {
1057  target_bitmap[target_idx] = false;
1058  } else {
1059  num_single_slot_targets++;
1060  }
1061  }
1062  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1063 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
Definition: sqldefs.h:72
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getSlotIndicesForTargetIndices()

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1092 of file ResultSet.cpp.

References advance_slot(), and targets_.

1092  {
1093  std::vector<size_t> slot_indices(targets_.size(), 0);
1094  size_t slot_index = 0;
1095  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1096  slot_indices[target_idx] = slot_index;
1097  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1098  }
1099  return slot_indices;
1100 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
+ Here is the call graph for this function:

◆ getStorage()

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 300 of file ResultSet.cpp.

References storage_.

300  {
301  return storage_.get();
302 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840

◆ getStorageIndex()

std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 659 of file ResultSet.cpp.

References appended_storage_, CHECK_NE, QueryMemoryDescriptor::getEntryCount(), query_mem_desc_, storage_, and UNREACHABLE.

Referenced by findStorage(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

659  {
660  size_t fixedup_entry_idx = entry_idx;
661  auto entry_count = storage_->query_mem_desc_.getEntryCount();
662  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
663  if (fixedup_entry_idx < entry_count) {
664  return {0, fixedup_entry_idx};
665  }
666  fixedup_entry_idx -= entry_count;
667  for (size_t i = 0; i < appended_storage_.size(); ++i) {
668  const auto& desc = appended_storage_[i]->query_mem_desc_;
669  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
670  entry_count = desc.getEntryCount();
671  if (fixedup_entry_idx < entry_count) {
672  return {i + 1, fixedup_entry_idx};
673  }
674  fixedup_entry_idx -= entry_count;
675  }
676  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
678  return {};
679 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define UNREACHABLE()
Definition: Logger.h:241
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getStringDictionaryPayloadCopy()

std::shared_ptr< const std::vector< std::string > > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 995 of file ResultSet.cpp.

References CHECK, executor_, and row_set_mem_owner_.

996  {
997  CHECK(executor_);
998  const auto sdp =
999  executor_->getStringDictionaryProxy(dict_id, row_set_mem_owner_, false);
1000  return sdp->getDictionary()->copyStrings();
1001 }
const Executor * executor_
Definition: ResultSet.h:850
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
#define CHECK(condition)
Definition: Logger.h:197

◆ getSupportedSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1073 of file ResultSet.cpp.

References CHECK, CHECK_GE, getSingleSlotTargetBitmap(), is_distinct_target(), isDirectColumnarConversionPossible(), kFLOAT, kSAMPLE, and targets_.

1074  {
1076  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1077 
1078  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1079  const auto& target = targets_[target_idx];
1080  if (single_slot_targets[target_idx] &&
1081  (is_distinct_target(target) ||
1082  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1083  single_slot_targets[target_idx] = false;
1084  num_single_slot_targets--;
1085  }
1086  }
1087  CHECK_GE(num_single_slot_targets, size_t(0));
1088  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1089 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1018
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1049
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ getTargetInfos()

const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 437 of file ResultSet.cpp.

References targets_.

437  {
438  return targets_;
439 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836

◆ getTargetInitVals()

const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 441 of file ResultSet.cpp.

References CHECK, and storage_.

441  {
442  CHECK(storage_);
443  return storage_->target_init_vals_;
444 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197

◆ getTargetValueFromBufferColwise()

TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1917 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1927  {
1929  const auto col1_ptr = col_ptr;
1930  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1931  const auto next_col_ptr =
1932  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1933  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1934  is_real_str_or_array(target_info))
1935  ? next_col_ptr
1936  : nullptr;
1937  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1938  is_real_str_or_array(target_info))
1939  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1940  : 0;
1941 
1942  // TODO(Saman): add required logics for count distinct
1943  // geospatial target values:
1944  if (target_info.sql_type.is_geometry()) {
1945  return makeGeoTargetValue(
1946  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1947  }
1948 
1949  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1950  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1951  CHECK(col2_ptr);
1952  CHECK(compact_sz2);
1953  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1954  return target_info.agg_kind == kAVG
1955  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1956  : makeVarlenTargetValue(ptr1,
1957  compact_sz1,
1958  ptr2,
1959  compact_sz2,
1960  target_info,
1961  target_logical_idx,
1962  translate_strings,
1963  global_entry_idx);
1964  }
1966  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1967  return makeTargetValue(ptr1,
1968  compact_sz1,
1969  target_info,
1970  target_logical_idx,
1971  translate_strings,
1973  global_entry_idx);
1974  }
1975  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1976  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1977  CHECK_GE(key_idx, 0);
1978  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1979  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1980  key_width,
1981  target_info,
1982  target_logical_idx,
1983  translate_strings,
1985  global_entry_idx);
1986 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
bool is_agg
Definition: TargetInfo.h:40
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
bool is_geometry() const
Definition: sqltypes.h:429
bool is_real_str_or_array(const TargetInfo &target_info)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:72
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getTargetValueFromBufferRowwise()

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1990 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1999  {
2000  if (UNLIKELY(fixup_count_distinct_pointers)) {
2001  if (is_distinct_target(target_info)) {
2002  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2003  const auto remote_ptr = *count_distinct_ptr_ptr;
2004  if (remote_ptr) {
2005  const auto ptr = storage_->mappedPtr(remote_ptr);
2006  if (ptr) {
2007  *count_distinct_ptr_ptr = ptr;
2008  } else {
2009  // need to create a zero filled buffer for this remote_ptr
2010  const auto& count_distinct_desc =
2011  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2012  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2013  ? count_distinct_desc.bitmapSizeBytes()
2014  : count_distinct_desc.bitmapPaddedSizeBytes();
2015  auto count_distinct_buffer =
2016  row_set_mem_owner_->allocateCountDistinctBuffer(bitmap_byte_sz);
2017  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2018  }
2019  }
2020  }
2021  return int64_t(0);
2022  }
2023  if (target_info.sql_type.is_geometry()) {
2024  return makeGeoTargetValue(
2025  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2026  }
2027 
2028  auto ptr1 = rowwise_target_ptr;
2029  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2031  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2032  // Single column perfect hash group by can utilize one slot for both the key and the
2033  // target value if both values fit in 8 bytes. Use the target value actual size for
2034  // this case. If they don't, the target value should be 8 bytes, so we can still use
2035  // the actual size rather than the compact size.
2036  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2037  }
2038 
2039  // logic for deciding width of column
2040  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2041  const auto ptr2 =
2042  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2043  int8_t compact_sz2 = 0;
2044  // Skip reading the second slot if we have a none encoded string and are using
2045  // the none encoded strings buffer attached to ResultSetStorage
2047  (target_info.sql_type.is_array() ||
2048  (target_info.sql_type.is_string() &&
2049  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2050  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2051  }
2052  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2053  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2054  }
2055  CHECK(ptr2);
2056  return target_info.agg_kind == kAVG
2057  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2058  : makeVarlenTargetValue(ptr1,
2059  compact_sz1,
2060  ptr2,
2061  compact_sz2,
2062  target_info,
2063  target_logical_idx,
2064  translate_strings,
2065  entry_buff_idx);
2066  }
2068  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2069  return makeTargetValue(ptr1,
2070  compact_sz1,
2071  target_info,
2072  target_logical_idx,
2073  translate_strings,
2075  entry_buff_idx);
2076  }
2077  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2078  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2079  return makeTargetValue(ptr1,
2080  key_width,
2081  target_info,
2082  target_logical_idx,
2083  translate_strings,
2085  entry_buff_idx);
2086 }
bool is_array() const
Definition: sqltypes.h:425
bool is_string() const
Definition: sqltypes.h:417
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:267
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
#define UNLIKELY(x)
Definition: likely.h:25
bool is_real_str_or_array(const TargetInfo &target_info)
bool is_geometry() const
Definition: sqltypes.h:429
int64_t getTargetGroupbyIndex(const size_t target_idx) const
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:72
bool isSingleColumnGroupByWithPerfectHash() const
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getVarlenOrderEntry()

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, copy_from_gpu(), CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto& data_mgr = executor->catalog_->getDataMgr();
636  copy_from_gpu(&data_mgr,
637  &cpu_buffer[0],
638  static_cast<CUdeviceptr>(str_ptr),
639  str_len,
640  device_id_);
641  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
642  } else {
644  host_str_ptr = reinterpret_cast<char*>(str_ptr);
645  }
646  std::string str(host_str_ptr, str_len);
647  return InternalTargetValue(row_set_mem_owner_->addString(str));
648 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
const Executor * getExecutor() const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
const int device_id_
Definition: ResultSet.h:838
+ Here is the call graph for this function:

◆ holdChunkIterators()

void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 475 of file ResultSet.h.

475  {
476  chunk_iters_.push_back(chunk_iters);
477  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:853

◆ holdChunks()

void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 472 of file ResultSet.h.

472  {
473  chunks_ = chunks;
474  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:852

◆ holdLiterals()

void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 478 of file ResultSet.h.

478  {
479  literal_buffers_.push_back(std::move(literal_buff));
480  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:856

◆ initializeStorage()

void ResultSet::initializeStorage ( ) const

Definition at line 992 of file ResultSetReduction.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

992  {
994  storage_->initializeColWise();
995  } else {
996  storage_->initializeRowWise();
997  }
998 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
+ Here is the call graph for this function:

◆ initPermutationBuffer()

std::vector< uint32_t > ResultSet::initPermutationBuffer ( const size_t  start,
const size_t  step 
)
private

Definition at line 596 of file ResultSet.cpp.

References CHECK, CHECK_NE, DEBUG_TIMER, findStorage(), QueryMemoryDescriptor::getEntryCount(), and query_mem_desc_.

Referenced by parallelTop(), and sort().

597  {
598  auto timer = DEBUG_TIMER(__func__);
599  CHECK_NE(size_t(0), step);
600  std::vector<uint32_t> permutation;
601  const auto total_entries = query_mem_desc_.getEntryCount();
602  permutation.reserve(total_entries / step);
603  for (size_t i = start; i < total_entries; i += step) {
604  const auto storage_lookup_result = findStorage(i);
605  const auto lhs_storage = storage_lookup_result.storage_ptr;
606  const auto off = storage_lookup_result.fixedup_entry_idx;
607  CHECK(lhs_storage);
608  if (!lhs_storage->isEmptyEntry(off)) {
609  permutation.emplace_back(i);
610  }
611  }
612  return permutation;
613 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_NE(x, y)
Definition: Logger.h:206
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isDirectColumnarConversionPossible()

bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1018 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), g_enable_direct_columnarization, QueryMemoryDescriptor::getQueryDescriptionType(), GroupByBaselineHash, GroupByPerfectHash, permutation_, Projection, and query_mem_desc_.

Referenced by copyColumnIntoBuffer(), and getSupportedSingleSlotTargetBitmap().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isExplain()

bool ResultSet::isExplain ( ) const

Definition at line 501 of file ResultSet.cpp.

References just_explain_.

501  {
502  return just_explain_;
503 }
const bool just_explain_
Definition: ResultSet.h:873

◆ isGeoColOnGpu()

bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1485 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1485  {
1486  // This should match the logic in makeGeoTargetValue which ultimately calls
1487  // fetch_data_from_gpu when the geo column is on the device.
1488  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1489  // utility function that handles this logic in one place
1490  CHECK_LT(col_idx, targets_.size());
1491  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1492  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1493  " is not a geo column. It is of type " +
1494  targets_[col_idx].sql_type.get_type_name() + ".");
1495  }
1496 
1497  const auto& target_info = targets_[col_idx];
1498  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1499  return false;
1500  }
1501 
1502  if (!lazy_fetch_info_.empty()) {
1503  CHECK_LT(col_idx, lazy_fetch_info_.size());
1504  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1505  return false;
1506  }
1507  }
1508 
1510 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
#define IS_GEO(T)
Definition: sqltypes.h:174
+ Here is the call graph for this function:

◆ isNull()

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2225 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_elem_type(), InternalTargetValue::i1, InternalTargetValue::i2, SQLTypeInfo::is_column(), InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), NULL_DOUBLE, null_val_bit_pattern(), and pair_to_double().

Referenced by ResultSet::ResultSetComparator< BUFFER_ITERATOR_TYPE >::operator()().

2227  {
2228  const auto& ti_ = (ti.is_column() ? ti.get_elem_type() : ti);
2229  if (ti_.get_notnull()) {
2230  return false;
2231  }
2232  if (val.isInt()) {
2233  return val.i1 == null_val_bit_pattern(ti_, float_argument_input);
2234  }
2235  if (val.isPair()) {
2236  return !val.i2 ||
2237  pair_to_double({val.i1, val.i2}, ti_, float_argument_input) == NULL_DOUBLE;
2238  }
2239  if (val.isStr()) {
2240  return !val.i1;
2241  }
2242  CHECK(val.isNull());
2243  return true;
2244 }
#define NULL_DOUBLE
Definition: sqltypes.h:186
bool isNull() const
Definition: TargetValue.h:69
bool isPair() const
Definition: TargetValue.h:67
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isStr() const
Definition: TargetValue.h:71
bool isInt() const
Definition: TargetValue.h:65
bool is_column() const
Definition: sqltypes.h:430
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:624
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isPermutationBufferEmpty()

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 487 of file ResultSet.h.

487 { return permutation_.empty(); };
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847

◆ isRowAtEmpty()

bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 282 of file ResultSetIteration.cpp.

Referenced by parallelRowCount().

282  {
283  if (logical_index >= entryCount()) {
284  return true;
285  }
286  const auto entry_idx =
287  permutation_.empty() ? logical_index : permutation_[logical_index];
288  const auto storage_lookup_result = findStorage(entry_idx);
289  const auto storage = storage_lookup_result.storage_ptr;
290  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
291  return storage->isEmptyEntry(local_entry_idx);
292 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
+ Here is the caller graph for this function:

◆ isTruncated()

bool ResultSet::isTruncated ( ) const

Definition at line 497 of file ResultSet.cpp.

References drop_first_, and keep_first_.

497  {
498  return keep_first_ + drop_first_;
499 }
size_t keep_first_
Definition: ResultSet.h:845
size_t drop_first_
Definition: ResultSet.h:844

◆ isZeroCopyColumnarConversionPossible()

bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1036 of file ResultSet.cpp.

References appended_storage_, QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getQueryDescriptionType(), lazy_fetch_info_, Projection, query_mem_desc_, and storage_.

Referenced by getColumnarBuffer().

1036  {
1039  appended_storage_.empty() && storage_ &&
1040  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1041 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ keepFirstN()

void ResultSet::keepFirstN ( const size_t  n)

Definition at line 94 of file ResultSet.cpp.

References CHECK_EQ.

94  {
96  keep_first_ = n;
97 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t keep_first_
Definition: ResultSet.h:845
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874

◆ lazyReadInt()

int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 650 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

652  {
653  if (!lazy_fetch_info_.empty()) {
654  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
655  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
656  if (col_lazy_fetch.is_lazily_fetched) {
657  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
658  col_buffers_.size());
659  int64_t ival_copy = ival;
660  auto& frag_col_buffers =
661  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
662  target_logical_idx,
663  ival_copy);
664  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
665  CHECK_LT(target_logical_idx, targets_.size());
666  const TargetInfo& target_info = targets_[target_logical_idx];
667  CHECK(!target_info.is_agg);
668  if (target_info.sql_type.is_string() &&
669  target_info.sql_type.get_compression() == kENCODING_NONE) {
670  VarlenDatum vd;
671  bool is_end{false};
673  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
674  storage_lookup_result.fixedup_entry_idx,
675  false,
676  &vd,
677  &is_end);
678  CHECK(!is_end);
679  if (vd.is_null) {
680  return 0;
681  }
682  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
683  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
684  }
685  return lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
686  }
687  }
688  return ival;
689 }
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:76
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:75
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
#define CHECK(condition)
Definition: Logger.h:197
size_t length
Definition: sqltypes.h:74
+ Here is the call graph for this function:

◆ makeGeoTargetValue()

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1516 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1520  {
1521  CHECK(target_info.sql_type.is_geometry());
1522 
1523  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1524  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1525  };
1526 
1527  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1528  const auto storage_info = findStorage(entry_buff_idx);
1529  auto crt_geo_col_ptr = geo_target_ptr;
1530  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1531  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1532  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1533  }
1534  // adjusting the column pointer to represent a pointer to the geo target value
1535  return crt_geo_col_ptr +
1536  storage_info.fixedup_entry_idx *
1537  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1538  slot_idx + range);
1539  };
1540 
1541  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1543  ? getNextTargetBufferColWise(slot_idx, range)
1544  : getNextTargetBufferRowWise(slot_idx, range);
1545  };
1546 
1547  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1548  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1550  };
1551 
1552  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1553  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1555  };
1556 
1557  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1558  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1560  };
1561 
1562  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1563  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1565  };
1566 
1567  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1568  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1570  };
1571 
1572  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1573  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1575  };
1576 
1577  auto getFragColBuffers = [&]() -> decltype(auto) {
1578  const auto storage_idx = getStorageIndex(entry_buff_idx);
1579  CHECK_LT(storage_idx.first, col_buffers_.size());
1580  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1581  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1582  };
1583 
1584  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1585 
1586  auto getDataMgr = [&]() {
1587  auto executor = query_mem_desc_.getExecutor();
1588  CHECK(executor);
1589  auto& data_mgr = executor->catalog_->getDataMgr();
1590  return &data_mgr;
1591  };
1592 
1593  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1594  const auto storage_idx = getStorageIndex(entry_buff_idx);
1595  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1596  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1597  return varlen_buffer;
1598  };
1599 
1600  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1601  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1602  return TargetValue(nullptr);
1603  }
1604 
1605  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1606  if (!lazy_fetch_info_.empty()) {
1607  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1608  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1609  }
1610 
1611  switch (target_info.sql_type.get_type()) {
1612  case kPOINT: {
1613  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1614  const auto& varlen_buffer = getSeparateVarlenStorage();
1615  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1616  varlen_buffer.size());
1617 
1618  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1619  target_info.sql_type,
1621  nullptr,
1622  false,
1623  device_id_,
1624  reinterpret_cast<int64_t>(
1625  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1626  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1627  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1628  const auto& frag_col_buffers = getFragColBuffers();
1629  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1630  target_info.sql_type,
1632  frag_col_buffers[col_lazy_fetch->local_col_id],
1633  getCoordsDataPtr(geo_target_ptr));
1634  } else {
1635  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1636  target_info.sql_type,
1638  is_gpu_fetch ? getDataMgr() : nullptr,
1639  is_gpu_fetch,
1640  device_id_,
1641  getCoordsDataPtr(geo_target_ptr),
1642  getCoordsLength(geo_target_ptr));
1643  }
1644  break;
1645  }
1646  case kLINESTRING: {
1647  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1648  const auto& varlen_buffer = getSeparateVarlenStorage();
1649  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1650  varlen_buffer.size());
1651 
1652  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1653  target_info.sql_type,
1655  nullptr,
1656  false,
1657  device_id_,
1658  reinterpret_cast<int64_t>(
1659  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1660  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1661  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1662  const auto& frag_col_buffers = getFragColBuffers();
1663  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1664  target_info.sql_type,
1666  frag_col_buffers[col_lazy_fetch->local_col_id],
1667  getCoordsDataPtr(geo_target_ptr));
1668  } else {
1669  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1670  target_info.sql_type,
1672  is_gpu_fetch ? getDataMgr() : nullptr,
1673  is_gpu_fetch,
1674  device_id_,
1675  getCoordsDataPtr(geo_target_ptr),
1676  getCoordsLength(geo_target_ptr));
1677  }
1678  break;
1679  }
1680  case kPOLYGON: {
1681  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1682  const auto& varlen_buffer = getSeparateVarlenStorage();
1683  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1684  varlen_buffer.size());
1685 
1686  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1687  target_info.sql_type,
1689  nullptr,
1690  false,
1691  device_id_,
1692  reinterpret_cast<int64_t>(
1693  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1694  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1695  reinterpret_cast<int64_t>(
1696  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1697  static_cast<int64_t>(
1698  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1699  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1700  const auto& frag_col_buffers = getFragColBuffers();
1701 
1702  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1703  target_info.sql_type,
1705  frag_col_buffers[col_lazy_fetch->local_col_id],
1706  getCoordsDataPtr(geo_target_ptr),
1707  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1708  getCoordsDataPtr(geo_target_ptr));
1709  } else {
1710  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1711  target_info.sql_type,
1713  is_gpu_fetch ? getDataMgr() : nullptr,
1714  is_gpu_fetch,
1715  device_id_,
1716  getCoordsDataPtr(geo_target_ptr),
1717  getCoordsLength(geo_target_ptr),
1718  getRingSizesPtr(geo_target_ptr),
1719  getRingSizesLength(geo_target_ptr) * 4);
1720  }
1721  break;
1722  }
1723  case kMULTIPOLYGON: {
1724  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1725  const auto& varlen_buffer = getSeparateVarlenStorage();
1726  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1727  varlen_buffer.size());
1728 
1729  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1730  target_info.sql_type,
1732  nullptr,
1733  false,
1734  device_id_,
1735  reinterpret_cast<int64_t>(
1736  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1737  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1738  reinterpret_cast<int64_t>(
1739  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1740  static_cast<int64_t>(
1741  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1742  reinterpret_cast<int64_t>(
1743  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1744  static_cast<int64_t>(
1745  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1746  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1747  const auto& frag_col_buffers = getFragColBuffers();
1748 
1749  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1750  target_info.sql_type,
1752  frag_col_buffers[col_lazy_fetch->local_col_id],
1753  getCoordsDataPtr(geo_target_ptr),
1754  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1755  getCoordsDataPtr(geo_target_ptr),
1756  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1757  getCoordsDataPtr(geo_target_ptr));
1758  } else {
1759  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1760  target_info.sql_type,
1762  is_gpu_fetch ? getDataMgr() : nullptr,
1763  is_gpu_fetch,
1764  device_id_,
1765  getCoordsDataPtr(geo_target_ptr),
1766  getCoordsLength(geo_target_ptr),
1767  getRingSizesPtr(geo_target_ptr),
1768  getRingSizesLength(geo_target_ptr) * 4,
1769  getPolyRingsPtr(geo_target_ptr),
1770  getPolyRingsLength(geo_target_ptr) * 4);
1771  }
1772  break;
1773  }
1774  default:
1775  throw std::runtime_error("Unknown Geometry type encountered: " +
1776  target_info.sql_type.get_type_name());
1777  }
1778  UNREACHABLE();
1779  return TargetValue(nullptr);
1780 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define UNREACHABLE()
Definition: Logger.h:241
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const int local_col_id
Definition: ResultSet.h:235
const Executor * getExecutor() const
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:659
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:684
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool is_geometry() const
Definition: sqltypes.h:429
std::string get_type_name() const
Definition: sqltypes.h:362
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
const bool is_lazily_fetched
Definition: ResultSet.h:234
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:259
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const int device_id_
Definition: ResultSet.h:838
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeTargetValue()

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1783 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), executor_, exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), SQLTypeInfo::get_elem_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_column(), SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1789  {
1790  auto actual_compact_sz = compact_sz;
1791  const auto& type_info =
1792  (target_info.sql_type.is_column() ? target_info.sql_type.get_elem_type()
1793  : target_info.sql_type);
1794  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1796  actual_compact_sz = sizeof(float);
1797  } else {
1798  actual_compact_sz = sizeof(double);
1799  }
1800  if (target_info.is_agg &&
1801  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1802  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1803  target_info.agg_kind == kSINGLE_VALUE)) {
1804  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1805  // padded size to 4 bytes to properly read each value.
1806  actual_compact_sz = sizeof(float);
1807  }
1808  }
1809  if (get_compact_type(target_info).is_date_in_days()) {
1810  // Dates encoded in days are converted to 8 byte values on read.
1811  actual_compact_sz = sizeof(int64_t);
1812  }
1813 
1814  // String dictionary keys are read as 32-bit values regardless of encoding
1815  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1816  type_info.get_comp_param()) {
1817  actual_compact_sz = sizeof(int32_t);
1818  }
1819 
1820  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1821  const auto& chosen_type = get_compact_type(target_info);
1822  if (!lazy_fetch_info_.empty()) {
1823  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1824  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1825  if (col_lazy_fetch.is_lazily_fetched) {
1826  CHECK_GE(ival, 0);
1827  const auto storage_idx = getStorageIndex(entry_buff_idx);
1828  CHECK_LT(storage_idx.first, col_buffers_.size());
1829  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1830  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1831  ival = lazy_decode(
1832  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1833  if (chosen_type.is_fp()) {
1834  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1835  if (chosen_type.get_type() == kFLOAT) {
1836  return ScalarTargetValue(static_cast<float>(dval));
1837  } else {
1838  return ScalarTargetValue(dval);
1839  }
1840  }
1841  }
1842  }
1843  if (chosen_type.is_fp()) {
1844  switch (actual_compact_sz) {
1845  case 8: {
1846  const auto dval = *reinterpret_cast<const double*>(ptr);
1847  return chosen_type.get_type() == kFLOAT
1848  ? ScalarTargetValue(static_cast<const float>(dval))
1849  : ScalarTargetValue(dval);
1850  }
1851  case 4: {
1852  CHECK_EQ(kFLOAT, chosen_type.get_type());
1853  return *reinterpret_cast<const float*>(ptr);
1854  }
1855  default:
1856  CHECK(false);
1857  }
1858  }
1859  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1860  chosen_type.is_timeinterval()) {
1861  if (is_distinct_target(target_info)) {
1863  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1864  }
1865  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1866  // right type instead
1867  if (inline_int_null_val(chosen_type) ==
1868  int_resize_cast(ival, chosen_type.get_logical_size())) {
1869  return inline_int_null_val(type_info);
1870  }
1871  return ival;
1872  }
1873  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1874  if (translate_strings) {
1875  if (static_cast<int32_t>(ival) ==
1876  NULL_INT) { // TODO(alex): this isn't nice, fix it
1877  return NullableString(nullptr);
1878  }
1879  StringDictionaryProxy* sdp{nullptr};
1880  if (!chosen_type.get_comp_param()) {
1881  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1882  } else {
1883  sdp = executor_
1884  ? executor_->getStringDictionaryProxy(
1885  chosen_type.get_comp_param(), row_set_mem_owner_, false)
1886  : row_set_mem_owner_->getStringDictProxy(chosen_type.get_comp_param());
1887  }
1888  return NullableString(sdp->getString(ival));
1889  } else {
1890  return static_cast<int64_t>(static_cast<int32_t>(ival));
1891  }
1892  }
1893  if (chosen_type.is_decimal()) {
1894  if (decimal_to_double) {
1895  if (target_info.is_agg &&
1896  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1897  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1898  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1899  return NULL_DOUBLE;
1900  }
1901  if (ival ==
1902  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1903  return NULL_DOUBLE;
1904  }
1905  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1906  }
1907  return ival;
1908  }
1909  CHECK(false);
1910  return TargetValue(int64_t(0));
1911 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define NULL_DOUBLE
Definition: sqltypes.h:186
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
bool is_column() const
Definition: sqltypes.h:430
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:659
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
Definition: sqldefs.h:75
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
#define NULL_INT
Definition: sqltypes.h:183
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:302
#define CHECK_LT(x, y)
Definition: Logger.h:207
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:624
#define CHECK(condition)
Definition: Logger.h:197
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Definition: sqldefs.h:74
Definition: sqldefs.h:72
bool is_date_in_days() const
Definition: sqltypes.h:632
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeVarlenTargetValue()

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1359 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, copy_from_gpu(), device_id_, device_type_, executor_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1366  {
1367  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1368  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1369  if (varlen_ptr < 0) {
1370  CHECK_EQ(-1, varlen_ptr);
1371  if (target_info.sql_type.get_type() == kARRAY) {
1372  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1373  }
1374  return TargetValue(nullptr);
1375  }
1376  const auto storage_idx = getStorageIndex(entry_buff_idx);
1377  if (target_info.sql_type.is_string()) {
1378  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1379  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1380  const auto& varlen_buffer_for_storage =
1381  serialized_varlen_buffer_[storage_idx.first];
1382  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1383  return varlen_buffer_for_storage[varlen_ptr];
1384  } else if (target_info.sql_type.get_type() == kARRAY) {
1385  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1386  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1387  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1388 
1389  return build_array_target_value(
1390  target_info.sql_type,
1391  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1392  varlen_buffer[varlen_ptr].size(),
1393  translate_strings,
1395  executor_);
1396  } else {
1397  CHECK(false);
1398  }
1399  }
1400  if (!lazy_fetch_info_.empty()) {
1401  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1402  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1403  if (col_lazy_fetch.is_lazily_fetched) {
1404  const auto storage_idx = getStorageIndex(entry_buff_idx);
1405  CHECK_LT(storage_idx.first, col_buffers_.size());
1406  auto& frag_col_buffers =
1407  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1408  bool is_end{false};
1409  if (target_info.sql_type.is_string()) {
1410  VarlenDatum vd;
1411  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1412  frag_col_buffers[col_lazy_fetch.local_col_id])),
1413  varlen_ptr,
1414  false,
1415  &vd,
1416  &is_end);
1417  CHECK(!is_end);
1418  if (vd.is_null) {
1419  return TargetValue(nullptr);
1420  }
1421  CHECK(vd.pointer);
1422  CHECK_GT(vd.length, 0u);
1423  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1424  return fetched_str;
1425  } else {
1426  CHECK(target_info.sql_type.is_array());
1427  ArrayDatum ad;
1428  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1429  frag_col_buffers[col_lazy_fetch.local_col_id])),
1430  varlen_ptr,
1431  &ad,
1432  &is_end);
1433  CHECK(!is_end);
1434  if (ad.is_null) {
1435  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1436  }
1437  CHECK_GE(ad.length, 0u);
1438  if (ad.length > 0) {
1439  CHECK(ad.pointer);
1440  }
1441  return build_array_target_value(target_info.sql_type,
1442  ad.pointer,
1443  ad.length,
1444  translate_strings,
1446  executor_);
1447  }
1448  }
1449  }
1450  if (!varlen_ptr) {
1451  if (target_info.sql_type.is_array()) {
1452  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1453  }
1454  return TargetValue(nullptr);
1455  }
1456  auto length = read_int_from_buff(ptr2, compact_sz2);
1457  if (target_info.sql_type.is_array()) {
1458  const auto& elem_ti = target_info.sql_type.get_elem_type();
1459  length *= elem_ti.get_array_context_logical_size();
1460  }
1461  std::vector<int8_t> cpu_buffer;
1462  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1463  cpu_buffer.resize(length);
1464  const auto executor = query_mem_desc_.getExecutor();
1465  CHECK(executor);
1466  auto& data_mgr = executor->catalog_->getDataMgr();
1467  copy_from_gpu(&data_mgr,
1468  &cpu_buffer[0],
1469  static_cast<CUdeviceptr>(varlen_ptr),
1470  length,
1471  device_id_);
1472  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1473  }
1474  if (target_info.sql_type.is_array()) {
1475  return build_array_target_value(target_info.sql_type,
1476  reinterpret_cast<const int8_t*>(varlen_ptr),
1477  length,
1478  translate_strings,
1480  executor_);
1481  }
1482  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1483 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool is_array() const
Definition: sqltypes.h:425
bool is_string() const
Definition: sqltypes.h:417
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:76
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
int get_array_context_logical_size() const
Definition: sqltypes.h:465
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:267
#define CHECK_GT(x, y)
Definition: Logger.h:209
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:75
const Executor * getExecutor() const
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:131
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:659
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
boost::optional< std::vector< ScalarTargetValue > > ArrayTargetValue
Definition: TargetValue.h:157
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:624
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:259
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
TargetValue build_array_target_value(const SQLTypeInfo &array_ti, const int8_t *buff, const size_t buff_sz, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
size_t length
Definition: sqltypes.h:74
const int device_id_
Definition: ResultSet.h:838
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ moveToBegin()

void ResultSet::moveToBegin ( ) const

Definition at line 492 of file ResultSet.cpp.

References crt_row_buff_idx_, and fetched_so_far_.

Referenced by rowCount().

492  {
493  crt_row_buff_idx_ = 0;
494  fetched_so_far_ = 0;
495 }
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
+ Here is the caller graph for this function:

◆ parallelRowCount()

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 393 of file ResultSet.cpp.

References cpu_threads(), drop_first_, parse_ast::end, entryCount(), g_use_tbb_pool, anonymous_namespace{ResultSet.cpp}::get_truncated_row_count(), getLimit(), and isRowAtEmpty().

Referenced by rowCount().

393  {
394  auto execute_parallel_row_count = [this](auto counter_threads) -> size_t {
395  const size_t worker_count = cpu_threads();
396  for (size_t i = 0,
397  start_entry = 0,
398  stride = (entryCount() + worker_count - 1) / worker_count;
399  i < worker_count && start_entry < entryCount();
400  ++i, start_entry += stride) {
401  const auto end_entry = std::min(start_entry + stride, entryCount());
402  counter_threads.spawn(
403  [this](const size_t start, const size_t end) {
404  size_t row_count{0};
405  for (size_t i = start; i < end; ++i) {
406  if (!isRowAtEmpty(i)) {
407  ++row_count;
408  }
409  }
410  return row_count;
411  },
412  start_entry,
413  end_entry);
414  }
415  const auto row_counts = counter_threads.join();
416  const size_t row_count = std::accumulate(row_counts.begin(), row_counts.end(), 0);
417  return row_count;
418  };
419  // will fall back to futures threadpool if TBB is not enabled
420  const auto row_count =
422  ? execute_parallel_row_count(threadpool::ThreadPool<size_t>())
423  : execute_parallel_row_count(threadpool::FuturesThreadPool<size_t>());
424 
425  return get_truncated_row_count(row_count, getLimit(), drop_first_);
426 }
size_t entryCount() const
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:319
size_t drop_first_
Definition: ResultSet.h:844
bool g_use_tbb_pool
Definition: Execute.cpp:76
bool isRowAtEmpty(const size_t index) const
size_t getLimit() const
Definition: ResultSet.cpp:991
int cpu_threads()
Definition: thread_count.h:24
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parallelTop()

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Definition at line 619 of file ResultSet.cpp.

References cpu_threads(), createComparator(), DEBUG_TIMER, initPermutationBuffer(), permutation_, and topPermutation().

Referenced by sort().

620  {
621  auto timer = DEBUG_TIMER(__func__);
622  const size_t step = cpu_threads();
623  std::vector<std::vector<uint32_t>> strided_permutations(step);
624  std::vector<std::future<void>> init_futures;
625  for (size_t start = 0; start < step; ++start) {
626  init_futures.emplace_back(
627  std::async(std::launch::async, [this, start, step, &strided_permutations] {
628  strided_permutations[start] = initPermutationBuffer(start, step);
629  }));
630  }
631  for (auto& init_future : init_futures) {
632  init_future.wait();
633  }
634  for (auto& init_future : init_futures) {
635  init_future.get();
636  }
637  auto compare = createComparator(order_entries, true);
638  std::vector<std::future<void>> top_futures;
639  for (auto& strided_permutation : strided_permutations) {
640  top_futures.emplace_back(
641  std::async(std::launch::async, [&strided_permutation, &compare, top_n] {
642  topPermutation(strided_permutation, top_n, compare);
643  }));
644  }
645  for (auto& top_future : top_futures) {
646  top_future.wait();
647  }
648  for (auto& top_future : top_futures) {
649  top_future.get();
650  }
651  permutation_.reserve(strided_permutations.size() * top_n);
652  for (const auto& strided_permutation : strided_permutations) {
653  permutation_.insert(
654  permutation_.end(), strided_permutation.begin(), strided_permutation.end());
655  }
656  topPermutation(permutation_, top_n, compare);
657 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:596
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:878
#define DEBUG_TIMER(name)
Definition: Logger.h:313
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:774
int cpu_threads()
Definition: thread_count.h:24
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnCpu()

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 935 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, DEBUG_TIMER, QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), QueryMemoryDescriptor::hasKeylessHash(), query_mem_desc_, sort_groups_cpu(), and storage_.

Referenced by sort().

936  {
937  auto timer = DEBUG_TIMER(__func__);
939  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
940  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
941  CHECK_EQ(size_t(1), order_entries.size());
942  auto buffer_ptr = storage_->getUnderlyingBuffer();
943  for (const auto& order_entry : order_entries) {
944  const auto target_idx = order_entry.tle_no - 1;
945  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
946  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
947  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
948  sort_groups_cpu(sortkey_val_buff,
949  &idx_buff[0],
951  order_entry.is_desc,
952  chosen_bytes);
953  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
954  &idx_buff[0],
956  &tmp_buff[0],
957  sizeof(int64_t));
958  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
959  ++target_idx) {
960  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
961  continue;
962  }
963  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
964  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
965  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
966  apply_permutation_cpu(satellite_val_buff,
967  &idx_buff[0],
969  &tmp_buff[0],
970  chosen_bytes);
971  }
972  }
973 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnGpu()

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 900 of file ResultSet.cpp.

References copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, executor_, QueryMemoryDescriptor::getBufferSizeBytes(), GPU, inplace_sort_gpu(), KernelPerFragment, query_mem_desc_, and storage_.

Referenced by sort().

901  {
902  auto timer = DEBUG_TIMER(__func__);
903  auto data_mgr = &executor_->catalog_->getDataMgr();
904  const int device_id{0};
905  CudaAllocator cuda_allocator(data_mgr, device_id);
906  std::vector<int64_t*> group_by_buffers(executor_->blockSize());
907  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
908  auto dev_group_by_buffers =
909  create_dev_group_by_buffers(&cuda_allocator,
910  group_by_buffers,
912  executor_->blockSize(),
913  executor_->gridSize(),
914  device_id,
916  -1,
917  true,
918  true,
919  false,
920  nullptr);
922  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
924  data_mgr,
925  group_by_buffers,
927  dev_group_by_buffers.second,
929  executor_->blockSize(),
930  executor_->gridSize(),
931  device_id,
932  false);
933 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *cuda_allocator, const std::vector< int64_t *> &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:60
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
void copy_group_by_buffers_from_gpu(Data_Namespace::DataMgr *data_mgr, const std::vector< int64_t *> &group_by_buffers, const size_t groups_buffer_size, const CUdeviceptr group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer)
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rowCount()

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Definition at line 335 of file ResultSet.cpp.

References binSearchRowCount(), cached_row_count_, CHECK_GE, drop_first_, entryCount(), getNextRowUnlocked(), QueryMemoryDescriptor::getQueryDescriptionType(), just_explain_, keep_first_, moveToBegin(), parallelRowCount(), permutation_, Projection, query_mem_desc_, row_iteration_mutex_, and storage_.

335  {
336  if (just_explain_) {
337  return 1;
338  }
339  if (!permutation_.empty()) {
340  if (drop_first_ > permutation_.size()) {
341  return 0;
342  }
343  const auto limited_row_count = keep_first_ + drop_first_;
344  return limited_row_count ? std::min(limited_row_count, permutation_.size())
345  : permutation_.size();
346  }
347  if (cached_row_count_ != -1) {
349  return cached_row_count_;
350  }
351  if (!storage_) {
352  return 0;
353  }
354  if (permutation_.empty() &&
356  return binSearchRowCount();
357  }
358  if (force_parallel || entryCount() > 20000) {
359  return parallelRowCount();
360  }
361  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
362  moveToBegin();
363  size_t row_count{0};
364  while (true) {
365  auto crt_row = getNextRowUnlocked(false, false);
366  if (crt_row.empty()) {
367  break;
368  }
369  ++row_count;
370  }
371  moveToBegin();
372  return row_count;
373 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:875
size_t entryCount() const
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
size_t keep_first_
Definition: ResultSet.h:845
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:873
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
size_t drop_first_
Definition: ResultSet.h:844
size_t binSearchRowCount() const
Definition: ResultSet.cpp:380
size_t parallelRowCount() const
Definition: ResultSet.cpp:393
QueryDescriptionType getQueryDescriptionType() const
void moveToBegin() const
Definition: ResultSet.cpp:492
+ Here is the call graph for this function:

◆ rowIterator() [1/2]

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 336 of file ResultSet.h.

338  {
339  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
340 
341  // move to first logical position
342  ++rowIterator;
343 
344  for (size_t index = 0; index < from_logical_index; index++) {
345  ++rowIterator;
346  }
347 
348  return rowIterator;
349  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

◆ rowIterator() [2/2]

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 351 of file ResultSet.h.

352  {
353  return rowIterator(0, translate_strings, decimal_to_double);
354  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

◆ serialize()

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const

◆ serializeCountDistinctColumns()

void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private

◆ serializeProjection()

void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private

◆ serializeVarlenAggColumn()

void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private

◆ setCachedRowCount()

void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 375 of file ResultSet.cpp.

References cached_row_count_, and CHECK.

375  {
376  CHECK(cached_row_count_ == -1 || cached_row_count_ == static_cast<int64_t>(row_count));
377  cached_row_count_ = row_count;
378 }
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
#define CHECK(condition)
Definition: Logger.h:197

◆ setGeoReturnType()

void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 509 of file ResultSet.h.

509 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:878

◆ setKernelQueueTime()

void ResultSet::setKernelQueueTime ( const int64_t  kernel_queue_time)

Definition at line 475 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::kernel_queue_time, and timings_.

475  {
476  timings_.kernel_queue_time = kernel_queue_time;
477 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849

◆ setQueueTime()

void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 471 of file ResultSet.cpp.

References ResultSet::QueryExecutionTimings::executor_queue_time, and timings_.

471  {
472  timings_.executor_queue_time = queue_time;
473 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849

◆ setSeparateVarlenStorageValid()

void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 541 of file ResultSet.h.

References ResultSetStorage::binSearchRowCount().

541  {
543  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
+ Here is the call graph for this function:

◆ sort()

void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)

Definition at line 521 of file ResultSet.cpp.

References Executor::baseline_threshold, baselineSort(), cached_row_count_, canUseFastBaselineSort(), CHECK, CHECK_EQ, CPU, createComparator(), DEBUG_TIMER, doBaselineSort(), entryCount(), g_enable_watchdog, QueryMemoryDescriptor::getEntryCount(), getGpuCount(), GPU, initPermutationBuffer(), LOG, parallelTop(), permutation_, query_mem_desc_, radixSortOnCpu(), radixSortOnGpu(), QueryMemoryDescriptor::sortOnGpu(), sortPermutation(), storage_, targets_, topPermutation(), and logger::WARNING.

Referenced by RelAlgExecutor::executeRelAlgQueryNoRetry(), RelAlgExecutor::executeRelAlgQuerySingleStep(), RelAlgExecutor::executeRelAlgStep(), and RelAlgExecutor::executeSort().

522  {
523  auto timer = DEBUG_TIMER(__func__);
524 
525  if (!storage_) {
526  return;
527  }
529  CHECK(!targets_.empty());
530 #ifdef HAVE_CUDA
531  if (canUseFastBaselineSort(order_entries, top_n)) {
532  baselineSort(order_entries, top_n);
533  return;
534  }
535 #endif // HAVE_CUDA
536  if (query_mem_desc_.sortOnGpu()) {
537  try {
538  radixSortOnGpu(order_entries);
539  } catch (const OutOfMemory&) {
540  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
541  radixSortOnCpu(order_entries);
542  } catch (const std::bad_alloc&) {
543  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
544  radixSortOnCpu(order_entries);
545  }
546  return;
547  }
548  // This check isn't strictly required, but allows the index buffer to be 32-bit.
549  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
550  throw RowSortException("Sorting more than 4B elements not supported");
551  }
552 
553  CHECK(permutation_.empty());
554 
555  const bool use_heap{order_entries.size() == 1 && top_n};
556  if (use_heap && entryCount() > 100000) {
557  if (g_enable_watchdog && (entryCount() > 20000000)) {
558  throw WatchdogException("Sorting the result would be too slow");
559  }
560  parallelTop(order_entries, top_n);
561  return;
562  }
563 
565  throw WatchdogException("Sorting the result would be too slow");
566  }
567 
569 
570  auto compare = createComparator(order_entries, use_heap);
571 
572  if (use_heap) {
573  topPermutation(permutation_, top_n, compare);
574  } else {
575  sortPermutation(compare);
576  }
577 }
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t entryCount() const
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:935
#define LOG(tag)
Definition: Logger.h:188
static const size_t baseline_threshold
Definition: Execute.h:933
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:900
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::atomic< int64_t > cached_row_count_
Definition: ResultSet.h:874
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:596
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:878
void sortPermutation(const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:894
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
Definition: ResultSet.cpp:619
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
bool g_enable_watchdog
Definition: Execute.cpp:74
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:774
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ sortPermutation()

void ResultSet::sortPermutation ( const std::function< bool(const uint32_t, const uint32_t)>  compare)
private

Definition at line 894 of file ResultSet.cpp.

References DEBUG_TIMER, and permutation_.

Referenced by sort().

895  {
896  auto timer = DEBUG_TIMER(__func__);
897  std::sort(permutation_.begin(), permutation_.end(), compare);
898 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the caller graph for this function:

◆ syncEstimatorBuffer()

void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 456 of file ResultSet.cpp.

References CHECK, CHECK_EQ, checked_calloc(), copy_from_gpu(), data_mgr_, device_estimator_buffer_, device_id_, device_type_, estimator_, Data_Namespace::AbstractBuffer::getMemoryPtr(), GPU, and host_estimator_buffer_.

456  {
459  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
461  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
463  auto device_buffer_ptr = device_estimator_buffer_->getMemoryPtr();
466  reinterpret_cast<CUdeviceptr>(device_buffer_ptr),
467  estimator_->getBufferSize(),
468  device_id_);
469 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
virtual int8_t * getMemoryPtr()=0
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:52
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
#define CHECK(condition)
Definition: Logger.h:197
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863
const int device_id_
Definition: ResultSet.h:838
+ Here is the call graph for this function:

◆ topPermutation()

void ResultSet::topPermutation ( std::vector< uint32_t > &  to_sort,
const size_t  n,
const std::function< bool(const uint32_t, const uint32_t)>  compare 
)
staticprivate

Definition at line 878 of file ResultSet.cpp.

References DEBUG_TIMER.

Referenced by parallelTop(), and sort().

881  {
882  auto timer = DEBUG_TIMER(__func__);
883  std::make_heap(to_sort.begin(), to_sort.end(), compare);
884  std::vector<uint32_t> permutation_top;
885  permutation_top.reserve(n);
886  for (size_t i = 0; i < n && !to_sort.empty(); ++i) {
887  permutation_top.push_back(to_sort.front());
888  std::pop_heap(to_sort.begin(), to_sort.end(), compare);
889  to_sort.pop_back();
890  }
891  to_sort.swap(permutation_top);
892 }
#define DEBUG_TIMER(name)
Definition: Logger.h:313
+ Here is the caller graph for this function:

◆ unserialize()

static std::unique_ptr<ResultSet> ResultSet::unserialize ( const TSerializedRows &  serialized_rows,
const Executor  
)
static

◆ unserializeCountDistinctColumns()

void ResultSet::unserializeCountDistinctColumns ( const TSerializedRows &  )
private

◆ updateStorageEntryCount()

void ResultSet::updateStorageEntryCount ( const size_t  new_entry_count)
inline

Definition at line 364 of file ResultSet.h.

References File_Namespace::append(), CHECK, anonymous_namespace{TypedDataAccessors.h}::decimal_to_double(), QueryMemoryDescriptor::getQueryDescriptionType(), Projection, ResultSetStorage::query_mem_desc_, ResultSetStorage::ResultSet, and QueryMemoryDescriptor::setEntryCount().

364  {
366  query_mem_desc_.setEntryCount(new_entry_count);
367  CHECK(storage_);
368  storage_->updateEntryCount(new_entry_count);
369  }
void setEntryCount(const size_t val)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

Friends And Related Function Documentation

◆ ColumnarResults

friend class ColumnarResults
friend

Definition at line 887 of file ResultSet.h.

◆ ResultSetManager

friend class ResultSetManager
friend

Definition at line 885 of file ResultSet.h.

◆ ResultSetRowIterator

friend class ResultSetRowIterator
friend

Definition at line 886 of file ResultSet.h.

Member Data Documentation

◆ appended_storage_

◆ cached_row_count_

std::atomic<int64_t> ResultSet::cached_row_count_
mutableprivate

Definition at line 874 of file ResultSet.h.

Referenced by append(), ResultSet(), rowCount(), setCachedRowCount(), and sort().

◆ chunk_iters_

std::vector<std::shared_ptr<std::list<ChunkIter> > > ResultSet::chunk_iters_
private

Definition at line 853 of file ResultSet.h.

Referenced by append().

◆ chunks_

std::list<std::shared_ptr<Chunk_NS::Chunk> > ResultSet::chunks_
private

Definition at line 852 of file ResultSet.h.

Referenced by append().

◆ col_buffers_

std::vector<std::vector<std::vector<const int8_t*> > > ResultSet::col_buffers_
private

◆ column_wise_comparator_

std::unique_ptr<ResultSetComparator<ColumnWiseTargetAccessor> > ResultSet::column_wise_comparator_
private

Definition at line 883 of file ResultSet.h.

◆ consistent_frag_sizes_

std::vector<std::vector<int64_t> > ResultSet::consistent_frag_sizes_
private

Definition at line 860 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ crt_row_buff_idx_

size_t ResultSet::crt_row_buff_idx_
mutableprivate

◆ data_mgr_

Data_Namespace::DataMgr* ResultSet::data_mgr_
private

Definition at line 865 of file ResultSet.h.

Referenced by ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ device_estimator_buffer_

Data_Namespace::AbstractBuffer* ResultSet::device_estimator_buffer_ {nullptr}
private

Definition at line 863 of file ResultSet.h.

Referenced by getDeviceEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ device_id_

const int ResultSet::device_id_
private

◆ device_type_

◆ drop_first_

size_t ResultSet::drop_first_
private

◆ estimator_

const std::shared_ptr<const Analyzer::Estimator> ResultSet::estimator_
private

Definition at line 862 of file ResultSet.h.

Referenced by definitelyHasNoRows(), ResultSet(), and syncEstimatorBuffer().

◆ executor_

◆ explanation_

std::string ResultSet::explanation_
private

Definition at line 872 of file ResultSet.h.

◆ fetched_so_far_

size_t ResultSet::fetched_so_far_
mutableprivate

Definition at line 843 of file ResultSet.h.

Referenced by moveToBegin(), and ResultSet().

◆ frag_offsets_

std::vector<std::vector<std::vector<int64_t> > > ResultSet::frag_offsets_
private

Definition at line 859 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ geo_return_type_

GeoReturnType ResultSet::geo_return_type_
mutableprivate

Definition at line 878 of file ResultSet.h.

Referenced by makeGeoTargetValue(), and ResultSet().

◆ host_estimator_buffer_

int8_t* ResultSet::host_estimator_buffer_ {nullptr}
mutableprivate

Definition at line 864 of file ResultSet.h.

Referenced by getHostEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ just_explain_

const bool ResultSet::just_explain_
private

Definition at line 873 of file ResultSet.h.

Referenced by colCount(), definitelyHasNoRows(), getColType(), isExplain(), ResultSet(), and rowCount().

◆ keep_first_

size_t ResultSet::keep_first_
private

Definition at line 845 of file ResultSet.h.

Referenced by advanceCursorToNextEntry(), getLimit(), isTruncated(), ResultSet(), and rowCount().

◆ lazy_fetch_info_

const std::vector<ColumnLazyFetchInfo> ResultSet::lazy_fetch_info_
private

◆ literal_buffers_

std::vector<std::vector<int8_t> > ResultSet::literal_buffers_
private

Definition at line 856 of file ResultSet.h.

Referenced by append().

◆ permutation_

std::vector<uint32_t> ResultSet::permutation_
private

◆ query_mem_desc_

◆ row_iteration_mutex_

std::mutex ResultSet::row_iteration_mutex_
mutableprivate

Definition at line 875 of file ResultSet.h.

Referenced by rowCount().

◆ row_set_mem_owner_

◆ row_wise_comparator_

std::unique_ptr<ResultSetComparator<RowWiseTargetAccessor> > ResultSet::row_wise_comparator_
private

Definition at line 882 of file ResultSet.h.

◆ separate_varlen_storage_valid_

bool ResultSet::separate_varlen_storage_valid_
private

◆ serialized_varlen_buffer_

std::vector<SerializedVarlenBufferStorage> ResultSet::serialized_varlen_buffer_
private

Definition at line 870 of file ResultSet.h.

Referenced by append(), makeGeoTargetValue(), and makeVarlenTargetValue().

◆ storage_

◆ targets_

◆ timings_

QueryExecutionTimings ResultSet::timings_
private

The documentation for this class was generated from the following files: