OmniSciDB  ab4938a6a3
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  ResultSetComparator
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t *>> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 
void setCachedRowCount (const size_t row_count) const
 
size_t entryCount () const
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr< RowSetMemoryOwnergetRowSetMemOwner () const
 
const std::vector< uint32_t > & getPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >, size_t > getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector< ColumnLazyFetchInfo > & getLazyFetchInfo () const
 
void setSeparateVarlenStorageValid (const bool val)
 
std::shared_ptr< const std::vector< std::string > > getStringDictionaryPayloadCopy (const int dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 

Private Types

using BufferSet = std::set< int64_t >
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getColumnInternal (const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const int8_t * > & getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
std::function< bool(const uint32_t, const uint32_t)> createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
 
void sortPermutation (const std::function< bool(const uint32_t, const uint32_t)> compare)
 
std::vector< uint32_t > initPermutationBuffer (const size_t start, const size_t step)
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (BufferSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static void topPermutation (std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr< RowSetMemoryOwnerrow_set_mem_owner_
 
std::vector< uint32_t > permutation_
 
int64_t queue_time_ms_
 
int64_t render_time_ms_
 
const Executorexecutor_
 
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
 
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
 
std::vector< std::vector< int8_t > > literal_buffers_
 
const std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
 
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
 
std::vector< std::vector< int64_t > > consistent_frag_sizes_
 
const std::shared_ptr< const Analyzer::Estimatorestimator_
 
int8_t * estimator_buffer_
 
int8_t * host_estimator_buffer_
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector< SerializedVarlenBufferStorageserialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
std::atomic< ssize_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
 
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 304 of file ResultSet.h.

Member Typedef Documentation

◆ BufferSet

using ResultSet::BufferSet = std::set<int64_t>
private

Definition at line 812 of file ResultSet.h.

◆ SerializedVarlenBufferStorage

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 850 of file ResultSet.h.

Member Enumeration Documentation

◆ GeoReturnType

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 490 of file ResultSet.h.

490  {
493  WktString,
496  GeoTargetValueGpuPtr
498  };
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue > > GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

◆ ResultSet() [1/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

Definition at line 100 of file ResultSet.cpp.

References CudaAllocator::alloc(), cached_row_count_, checked_calloc(), col_buffers_, consistent_frag_sizes_, crt_row_buff_idx_, data_mgr_, device_id_, device_type_, drop_first_, estimator_, estimator_buffer_, executor_, fetched_so_far_, frag_offsets_, geo_return_type_, Data_Namespace::DataMgr::getCudaMgr(), GPU, host_estimator_buffer_, just_explain_, keep_first_, lazy_fetch_info_, query_mem_desc_, queue_time_ms_, render_time_ms_, row_set_mem_owner_, separate_varlen_storage_valid_, targets_, WktString, and CudaMgr_Namespace::CudaMgr::zeroDeviceMem().

105  : targets_(targets)
106  , device_type_(device_type)
107  , device_id_(-1)
108  , query_mem_desc_(query_mem_desc)
109  , crt_row_buff_idx_(0)
110  , fetched_so_far_(0)
111  , drop_first_(0)
112  , keep_first_(0)
113  , row_set_mem_owner_(row_set_mem_owner)
114  , queue_time_ms_(0)
115  , render_time_ms_(0)
116  , executor_(executor)
117  , estimator_buffer_(nullptr)
118  , host_estimator_buffer_(nullptr)
119  , data_mgr_(nullptr)
121  , just_explain_(false)
122  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:845
GeoReturnType geo_return_type_
Definition: ResultSet.h:860
const Executor * executor_
Definition: ResultSet.h:832
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
size_t keep_first_
Definition: ResultSet.h:827
const bool just_explain_
Definition: ResultSet.h:855
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
size_t drop_first_
Definition: ResultSet.h:826
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:847
int64_t queue_time_ms_
Definition: ResultSet.h:830
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
size_t fetched_so_far_
Definition: ResultSet.h:825
size_t crt_row_buff_idx_
Definition: ResultSet.h:824
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
int64_t render_time_ms_
Definition: ResultSet.h:831
const int device_id_
Definition: ResultSet.h:820
+ Here is the call graph for this function:

◆ ResultSet() [2/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t *>> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

◆ ResultSet() [3/5]

ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)

◆ ResultSet() [4/5]

ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 186 of file ResultSet.cpp.

188  , device_id_(-1)
189  , fetched_so_far_(0)
190  , queue_time_ms_(0)
191  , render_time_ms_(0)
192  , estimator_buffer_(nullptr)
193  , host_estimator_buffer_(nullptr)
195  , explanation_(explanation)
196  , just_explain_(true)
197  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:845
GeoReturnType geo_return_type_
Definition: ResultSet.h:860
const bool just_explain_
Definition: ResultSet.h:855
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
int64_t queue_time_ms_
Definition: ResultSet.h:830
std::string explanation_
Definition: ResultSet.h:854
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
size_t fetched_so_far_
Definition: ResultSet.h:825
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
int64_t render_time_ms_
Definition: ResultSet.h:831
const int device_id_
Definition: ResultSet.h:820

◆ ResultSet() [5/5]

ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 200 of file ResultSet.cpp.

204  , device_id_(-1)
205  , fetched_so_far_(0)
206  , row_set_mem_owner_(row_set_mem_owner)
207  , queue_time_ms_(queue_time_ms)
208  , render_time_ms_(render_time_ms)
209  , estimator_buffer_(nullptr)
210  , host_estimator_buffer_(nullptr)
212  , just_explain_(true)
213  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:845
GeoReturnType geo_return_type_
Definition: ResultSet.h:860
const bool just_explain_
Definition: ResultSet.h:855
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
int64_t queue_time_ms_
Definition: ResultSet.h:830
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
size_t fetched_so_far_
Definition: ResultSet.h:825
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
int64_t render_time_ms_
Definition: ResultSet.h:831
const int device_id_
Definition: ResultSet.h:820

◆ ~ResultSet()

ResultSet::~ResultSet ( )

Definition at line 216 of file ResultSet.cpp.

References appended_storage_, CHECK, CPU, device_type_, estimator_buffer_, host_estimator_buffer_, and storage_.

216  {
217  if (storage_) {
218  if (!storage_->buff_is_provided_) {
219  CHECK(storage_->getUnderlyingBuffer());
220  free(storage_->getUnderlyingBuffer());
221  }
222  }
223  for (auto& storage : appended_storage_) {
224  if (storage && !storage->buff_is_provided_) {
225  free(storage->getUnderlyingBuffer());
226  }
227  }
231  }
232 }
int8_t * estimator_buffer_
Definition: ResultSet.h:845
AppendedStorage appended_storage_
Definition: ResultSet.h:823
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197

Member Function Documentation

◆ advanceCursorToNextEntry() [1/2]

void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private

Definition at line 693 of file ResultSetIteration.cpp.

References CHECK_LE, ResultSetRowIterator::crt_row_buff_idx_, drop_first_, entryCount(), ResultSetRowIterator::fetched_so_far_, findStorage(), ResultSetRowIterator::global_entry_idx_, ResultSetRowIterator::global_entry_idx_valid_, keep_first_, and permutation_.

693  {
695  iter.global_entry_idx_valid_ = false;
696  return;
697  }
698 
699  while (iter.crt_row_buff_idx_ < entryCount()) {
700  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
702  const auto storage_lookup_result = findStorage(entry_idx);
703  const auto storage = storage_lookup_result.storage_ptr;
704  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
705  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
706  if (iter.fetched_so_far_ < drop_first_) {
707  ++iter.fetched_so_far_;
708  } else {
709  break;
710  }
711  }
712  ++iter.crt_row_buff_idx_;
713  }
714  if (permutation_.empty()) {
716  } else {
718  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
719  ? iter.crt_row_buff_idx_
721  }
722 
724 
725  if (iter.global_entry_idx_valid_) {
726  ++iter.crt_row_buff_idx_;
727  ++iter.fetched_so_far_;
728  }
729 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:827
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
size_t global_entry_idx_
Definition: ResultSet.h:278
size_t drop_first_
Definition: ResultSet.h:826
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:277
bool global_entry_idx_valid_
Definition: ResultSet.h:279
+ Here is the call graph for this function:

◆ advanceCursorToNextEntry() [2/2]

size_t ResultSet::advanceCursorToNextEntry ( ) const
private

Definition at line 733 of file ResultSetIteration.cpp.

References CHECK_LE, crt_row_buff_idx_, entryCount(), findStorage(), and permutation_.

733  {
734  while (crt_row_buff_idx_ < entryCount()) {
735  const auto entry_idx =
737  const auto storage_lookup_result = findStorage(entry_idx);
738  const auto storage = storage_lookup_result.storage_ptr;
739  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
740  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
741  break;
742  }
744  }
745  if (permutation_.empty()) {
746  return crt_row_buff_idx_;
747  }
751 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
#define CHECK_LE(x, y)
Definition: Logger.h:208
size_t crt_row_buff_idx_
Definition: ResultSet.h:824
+ Here is the call graph for this function:

◆ allocateStorage() [1/3]

const ResultSetStorage * ResultSet::allocateStorage ( ) const

Definition at line 238 of file ResultSet.cpp.

References CHECK, device_type_, QueryMemoryDescriptor::getBufferSizeBytes(), query_mem_desc_, row_set_mem_owner_, storage_, and targets_.

238  {
239  CHECK(!storage_);
241  auto buff =
243  storage_.reset(
244  new ResultSetStorage(targets_, query_mem_desc_, buff, /*buff_is_provided=*/true));
245  return storage_.get();
246 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ allocateStorage() [2/3]

const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &   
) const

◆ allocateStorage() [3/3]

const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const

◆ append()

void ResultSet::append ( ResultSet that)

Definition at line 278 of file ResultSet.cpp.

References appended_storage_, cached_row_count_, CHECK, CHECK_EQ, chunk_iters_, chunks_, col_buffers_, consistent_frag_sizes_, frag_offsets_, QueryMemoryDescriptor::getEntryCount(), literal_buffers_, query_mem_desc_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and QueryMemoryDescriptor::setEntryCount().

278  {
280  if (!that.storage_) {
281  return;
282  }
283  appended_storage_.push_back(std::move(that.storage_));
286  appended_storage_.back()->query_mem_desc_.getEntryCount());
287  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
288  col_buffers_.insert(
289  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
290  frag_offsets_.insert(
291  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
293  that.consistent_frag_sizes_.begin(),
294  that.consistent_frag_sizes_.end());
295  chunk_iters_.insert(
296  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
298  CHECK(that.separate_varlen_storage_valid_);
300  that.serialized_varlen_buffer_.begin(),
301  that.serialized_varlen_buffer_.end());
302  }
303  for (auto& buff : that.literal_buffers_) {
304  literal_buffers_.push_back(std::move(buff));
305  }
306 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:823
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:835
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:852
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:834
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:838
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:842
#define CHECK(condition)
Definition: Logger.h:197
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:841
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
+ Here is the call graph for this function:

◆ baselineSort()

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ binSearchRowCount()

size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 367 of file ResultSet.cpp.

References appended_storage_, drop_first_, keep_first_, and storage_.

Referenced by rowCount().

367  {
368  if (!storage_) {
369  return 0;
370  }
371 
372  size_t row_count = storage_->binSearchRowCount();
373  for (auto& s : appended_storage_) {
374  row_count += s->binSearchRowCount();
375  }
376 
377  if (keep_first_ + drop_first_) {
378  const auto limited_row_count = std::min(keep_first_ + drop_first_, row_count);
379  return limited_row_count < drop_first_ ? 0 : limited_row_count - drop_first_;
380  }
381 
382  return row_count;
383 }
AppendedStorage appended_storage_
Definition: ResultSet.h:823
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
size_t keep_first_
Definition: ResultSet.h:827
size_t drop_first_
Definition: ResultSet.h:826
+ Here is the caller graph for this function:

◆ canUseFastBaselineSort()

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ colCount()

size_t ResultSet::colCount ( ) const

Definition at line 312 of file ResultSet.cpp.

References just_explain_, and targets_.

312  {
313  return just_explain_ ? 1 : targets_.size();
314 }
const bool just_explain_
Definition: ResultSet.h:855
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818

◆ copyColumnIntoBuffer()

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copy its content into a contiguous output_buffer

Definition at line 1170 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1172  {
1174  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1175  CHECK(output_buffer_size > 0);
1176  CHECK(output_buffer);
1177  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1178  size_t out_buff_offset = 0;
1179 
1180  // the main storage:
1181  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1182  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1183  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1184  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1185  CHECK(crt_buffer_size <= output_buffer_size);
1186  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1187 
1188  out_buff_offset += crt_buffer_size;
1189 
1190  // the appended storages:
1191  for (size_t i = 0; i < appended_storage_.size(); i++) {
1192  const size_t crt_storage_row_count =
1193  appended_storage_[i]->query_mem_desc_.getEntryCount();
1194  if (crt_storage_row_count == 0) {
1195  // skip an empty appended storage
1196  continue;
1197  }
1198  CHECK_LT(out_buff_offset, output_buffer_size);
1199  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1200  const size_t column_offset =
1201  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1202  const int8_t* storage_buffer =
1203  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1204  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1205  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1206 
1207  out_buff_offset += crt_buffer_size;
1208  }
1209 }
AppendedStorage appended_storage_
Definition: ResultSet.h:823
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:923
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ create_active_buffer_set()

void ResultSet::create_active_buffer_set ( BufferSet count_distinct_active_buffer_set) const
private

◆ createComparator()

std::function<bool(const uint32_t, const uint32_t)> ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const bool  use_heap 
)
inlineprivate

Definition at line 757 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

Referenced by parallelTop(), and sort().

759  {
762  std::make_unique<ResultSetComparator<ColumnWiseTargetAccessor>>(
763  order_entries, use_heap, this);
764  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
765  return (*this->column_wise_comparator_)(lhs, rhs);
766  };
767  } else {
768  row_wise_comparator_ = std::make_unique<ResultSetComparator<RowWiseTargetAccessor>>(
769  order_entries, use_heap, this);
770  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
771  return (*this->row_wise_comparator_)(lhs, rhs);
772  };
773  }
774  }
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
Definition: ResultSet.h:865
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
Definition: ResultSet.h:864
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ definitelyHasNoRows()

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 423 of file ResultSet.cpp.

References estimator_, just_explain_, and storage_.

423  {
424  return !storage_ && !estimator_ && !just_explain_;
425 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
const bool just_explain_
Definition: ResultSet.h:855
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:844

◆ didOutputColumnar()

bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 508 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

508 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
+ Here is the call graph for this function:

◆ doBaselineSort()

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ dropFirstN()

void ResultSet::dropFirstN ( const size_t  n)

Definition at line 95 of file ResultSet.cpp.

References CHECK_EQ.

95  {
97  drop_first_ = n;
98 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t drop_first_
Definition: ResultSet.h:826
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856

◆ entryCount()

size_t ResultSet::entryCount ( ) const

Definition at line 753 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

Referenced by advanceCursorToNextEntry(), parallelRowCount(), rowCount(), and sort().

753  {
754  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
755 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fillOneEntry()

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 452 of file ResultSet.h.

References CHECK.

452  {
453  CHECK(storage_);
454  if (storage_->query_mem_desc_.didOutputColumnar()) {
455  storage_->fillOneEntryColWise(entry);
456  } else {
457  storage_->fillOneEntryRowWise(entry);
458  }
459  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK(condition)
Definition: Logger.h:197

◆ findStorage()

ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 657 of file ResultSet.cpp.

References appended_storage_, getStorageIndex(), and storage_.

Referenced by advanceCursorToNextEntry(), initPermutationBuffer(), and makeGeoTargetValue().

657  {
658  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
659  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
660  fixedup_entry_idx,
661  stg_idx};
662 }
AppendedStorage appended_storage_
Definition: ResultSet.h:823
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixupCountDistinctPointers()

void ResultSet::fixupCountDistinctPointers ( )
private

◆ fixupQueryMemoryDescriptor()

QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 492 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and QueryMemoryDescriptor::resetGroupColWidths().

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

493  {
494  auto query_mem_desc_copy = query_mem_desc;
495  query_mem_desc_copy.resetGroupColWidths(
496  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
497  if (query_mem_desc.didOutputColumnar()) {
498  return query_mem_desc_copy;
499  }
500  query_mem_desc_copy.alignPaddedSlots();
501  return query_mem_desc_copy;
502 }
void resetGroupColWidths(const std::vector< int8_t > &new_group_col_widths)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getBufferSizeBytes()

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 757 of file ResultSetIteration.cpp.

References CHECK, and storage_.

757  {
758  CHECK(storage_);
759  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
760 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK(condition)
Definition: Logger.h:197

◆ getColType()

SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 316 of file ResultSet.cpp.

References CHECK_LT, just_explain_, kAVG, kDOUBLE, kTEXT, and targets_.

316  {
317  if (just_explain_) {
318  return SQLTypeInfo(kTEXT, false);
319  }
320  CHECK_LT(col_idx, targets_.size());
321  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
322  : targets_[col_idx].sql_type;
323 }
const bool just_explain_
Definition: ResultSet.h:855
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
#define CHECK_LT(x, y)
Definition: Logger.h:207
Definition: sqltypes.h:53
Definition: sqldefs.h:72

◆ getColumnarBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1342 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1344  {
1345  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1346  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1347  const auto column_offset =
1348  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1349  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1350  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1351  storage_->query_mem_desc_.getEntryCount();
1352  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1353  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1354 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK_NE(x, y)
Definition: Logger.h:206

◆ getColumnarBuffer()

const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 948 of file ResultSet.cpp.

References CHECK, QueryMemoryDescriptor::getColOffInBytes(), isZeroCopyColumnarConversionPossible(), query_mem_desc_, and storage_.

948  {
950  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
951 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:941
#define CHECK(condition)
Definition: Logger.h:197
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:

◆ getColumnarPerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getColumnarPerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1288 of file ResultSetIteration.cpp.

References storage_.

1290  {
1291  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1292  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1293  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1294 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822

◆ getColumnFrag()

const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1141 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1143  {
1144  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1145  if (col_buffers_[storage_idx].size() > 1) {
1146  int64_t frag_id = 0;
1147  int64_t local_idx = global_idx;
1148  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1149  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1150  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1151  } else {
1152  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1153  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1154  CHECK_LE(local_idx, global_idx);
1155  }
1156  CHECK_GE(frag_id, int64_t(0));
1157  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1158  global_idx = local_idx;
1159  return col_buffers_[storage_idx][frag_id];
1160  } else {
1161  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1162  return col_buffers_[storage_idx][0];
1163  }
1164 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define CHECK_GE(x, y)
Definition: Logger.h:210
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK_LE(x, y)
Definition: Logger.h:208
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:842
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:841
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getColumnInternal()

InternalTargetValue ResultSet::getColumnInternal ( const int8_t *  buff,
const size_t  entry_idx,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

◆ getCurrentRowBufferIndex()

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 270 of file ResultSet.cpp.

References crt_row_buff_idx_.

270  {
271  if (crt_row_buff_idx_ == 0) {
272  throw std::runtime_error("current row buffer iteration index is undefined");
273  }
274  return crt_row_buff_idx_ - 1;
275 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:824

◆ getDataManager()

Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private

◆ getDeviceEstimatorBuffer()

int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 441 of file ResultSet.cpp.

References CHECK, device_type_, estimator_buffer_, and GPU.

441  {
443  return estimator_buffer_;
444 }
int8_t * estimator_buffer_
Definition: ResultSet.h:845
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197

◆ getDeviceId()

int ResultSet::getDeviceId ( ) const

Definition at line 488 of file ResultSet.cpp.

References device_id_.

488  {
489  return device_id_;
490 }
const int device_id_
Definition: ResultSet.h:820

◆ getDeviceType()

ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 234 of file ResultSet.cpp.

References device_type_.

234  {
235  return device_type_;
236 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819

◆ getDistinctBufferRefFromBufferRowwise()

int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private

◆ getEntryAt() [1/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

◆ getEntryAt() [2/2]

template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1212 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1214  {
1215  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1216  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1217  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1218  } else {
1219  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1220  }
1221  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1222  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1223  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1224  } else {
1225  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1226  }
1227  } else {
1228  UNREACHABLE() << "Invalid query type is used";
1229  return 0;
1230  }
1231 }
#define UNREACHABLE()
Definition: Logger.h:241

◆ getGeoReturnType()

GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 499 of file ResultSet.h.

499 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:860

◆ getGpuCount()

int ResultSet::getGpuCount ( ) const
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ getHostEstimatorBuffer()

int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 446 of file ResultSet.cpp.

References host_estimator_buffer_.

446  {
447  return host_estimator_buffer_;
448 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846

◆ getLazyFetchInfo()

const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 528 of file ResultSet.h.

528  {
529  return lazy_fetch_info_;
530  }
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839

◆ getLimit()

size_t ResultSet::getLimit ( ) const

Definition at line 896 of file ResultSet.cpp.

References keep_first_.

896  {
897  return keep_first_;
898 }
size_t keep_first_
Definition: ResultSet.h:827

◆ getNDVEstimator()

size_t ResultSet::getNDVEstimator ( ) const

Definition at line 22 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, and CHECK_LE.

22  {
23  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
25  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
26  const auto total_bits = estimator_->getBufferSize() * 8;
27  CHECK_LE(bits_set, total_bits);
28  const auto unset_bits = total_bits - bits_set;
29  const auto ratio = static_cast<double>(unset_bits) / total_bits;
30  if (ratio == 0.) {
31  throw std::runtime_error("Failed to get a high quality cardinality estimation");
32  }
33  return -static_cast<double>(total_bits) * log(ratio);
34 }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:844
#define CHECK_LE(x, y)
Definition: Logger.h:208
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
#define CHECK(condition)
Definition: Logger.h:197
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37
+ Here is the call graph for this function:

◆ getNextRow()

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 293 of file ResultSetIteration.cpp.

294  {
295  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
296  if (!storage_ && !just_explain_) {
297  return {};
298  }
299  return getNextRowUnlocked(translate_strings, decimal_to_double);
300 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:857
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:855

◆ getNextRowImpl()

std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 321 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

322  {
323  auto entry_buff_idx = advanceCursorToNextEntry();
325  return {};
326  }
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
330  return {};
331  }
332  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
333  CHECK(!row.empty());
335  ++fetched_so_far_;
336 
337  return row;
338 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:827
std::vector< TargetValue > getRowAt(const size_t index) const
size_t drop_first_
Definition: ResultSet.h:826
#define CHECK(condition)
Definition: Logger.h:197
size_t fetched_so_far_
Definition: ResultSet.h:825
size_t crt_row_buff_idx_
Definition: ResultSet.h:824
size_t advanceCursorToNextEntry() const

◆ getNextRowUnlocked()

std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 302 of file ResultSetIteration.cpp.

Referenced by rowCount().

304  {
305  if (just_explain_) {
306  if (fetched_so_far_) {
307  return {};
308  }
309  fetched_so_far_ = 1;
310  return {explanation_};
311  }
312  while (fetched_so_far_ < drop_first_) {
313  const auto row = getNextRowImpl(translate_strings, decimal_to_double);
314  if (row.empty()) {
315  return row;
316  }
317  }
318  return getNextRowImpl(translate_strings, decimal_to_double);
319 }
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:855
size_t drop_first_
Definition: ResultSet.h:826
std::string explanation_
Definition: ResultSet.h:854
size_t fetched_so_far_
Definition: ResultSet.h:825
+ Here is the caller graph for this function:

◆ getOneColRow()

OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 231 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), getRowAt(), and row_ptr_rowwise().

231  {
232  const auto storage_lookup_result = findStorage(global_entry_idx);
233  const auto storage = storage_lookup_result.storage_ptr;
234  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
235  if (storage->isEmptyEntry(local_entry_idx)) {
236  return {0, false};
237  }
238  const auto buff = storage->buff_;
239  CHECK(buff);
241  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
242  const auto key_bytes_with_padding =
244  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
245  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
246  keys_ptr,
247  global_entry_idx,
248  targets_.front(),
249  0,
250  0,
251  false,
252  false,
253  false);
254  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
255  CHECK(scalar_tv);
256  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
257  CHECK(ival_ptr);
258  return {*ival_ptr, true};
259 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
#define CHECK(condition)
Definition: Logger.h:197
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:

◆ getPaddedSlotWidthBytes()

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 517 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and ResultSetStorage::query_mem_desc_.

517  {
518  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
519  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
+ Here is the call graph for this function:

◆ getPermutationBuffer()

const std::vector< uint32_t > & ResultSet::getPermutationBuffer ( ) const

Definition at line 592 of file ResultSet.cpp.

References permutation_.

592  {
593  return permutation_;
594 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829

◆ getQueryDescriptionType()

QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 513 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and ResultSetStorage::query_mem_desc_.

513  {
515  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

◆ getQueryMemDesc()

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 427 of file ResultSet.cpp.

References CHECK, and storage_.

427  {
428  CHECK(storage_);
429  return storage_->query_mem_desc_;
430 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK(condition)
Definition: Logger.h:197

◆ getQueueTime()

int64_t ResultSet::getQueueTime ( ) const

Definition at line 467 of file ResultSet.cpp.

References queue_time_ms_.

467  {
468  return queue_time_ms_;
469 }
int64_t queue_time_ms_
Definition: ResultSet.h:830

◆ getRenderTime()

int64_t ResultSet::getRenderTime ( ) const

Definition at line 471 of file ResultSet.cpp.

References render_time_ms_.

471  {
472  return render_time_ms_;
473 }
int64_t render_time_ms_
Definition: ResultSet.h:831

◆ getRowAt() [1/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const

Referenced by get_byteoff_of_slot(), and getOneColRow().

+ Here is the caller graph for this function:

◆ getRowAt() [2/3]

TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const

◆ getRowAt() [3/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private

◆ getRowAtNoTranslations()

std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 270 of file ResultSetIteration.cpp.

272  {
273  if (logical_index >= entryCount()) {
274  return {};
275  }
276  const auto entry_idx =
277  permutation_.empty() ? logical_index : permutation_[logical_index];
278  return getRowAt(entry_idx, false, false, false, targets_to_skip);
279 }
size_t entryCount() const
std::vector< TargetValue > getRowAt(const size_t index) const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829

◆ getRowSetMemOwner()

std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 473 of file ResultSet.h.

473  {
474  return row_set_mem_owner_;
475  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828

◆ getRowWiseBaselineEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWiseBaselineEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1320 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1322  {
1323  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1324  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1325  auto keys_ptr = row_ptr_rowwise(
1326  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1327  const auto column_offset =
1328  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1329  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1330  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1331  const auto storage_buffer = keys_ptr + column_offset;
1332  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1333 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK_NE(x, y)
Definition: Logger.h:206
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
+ Here is the call graph for this function:

◆ getRowWisePerfectHashEntryAt() [1/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private

◆ getRowWisePerfectHashEntryAt() [2/2]

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1303 of file ResultSetIteration.cpp.

References storage_.

1305  {
1306  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1307  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1308  const int8_t* storage_buffer =
1309  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1310  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1311 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822

◆ getSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 954 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), kAVG, and targets_.

Referenced by getSupportedSingleSlotTargetBitmap().

954  {
955  std::vector<bool> target_bitmap(targets_.size(), true);
956  size_t num_single_slot_targets = 0;
957  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
958  const auto& sql_type = targets_[target_idx].sql_type;
959  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
960  target_bitmap[target_idx] = false;
961  } else if (sql_type.is_varlen()) {
962  target_bitmap[target_idx] = false;
963  } else {
964  num_single_slot_targets++;
965  }
966  }
967  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
968 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
Definition: sqldefs.h:72
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getSlotIndicesForTargetIndices()

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 997 of file ResultSet.cpp.

References advance_slot(), and targets_.

997  {
998  std::vector<size_t> slot_indices(targets_.size(), 0);
999  size_t slot_index = 0;
1000  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1001  slot_indices[target_idx] = slot_index;
1002  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1003  }
1004  return slot_indices;
1005 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
+ Here is the call graph for this function:

◆ getStorage()

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 308 of file ResultSet.cpp.

References storage_.

308  {
309  return storage_.get();
310 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822

◆ getStorageIndex()

std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 635 of file ResultSet.cpp.

References appended_storage_, CHECK_NE, QueryMemoryDescriptor::getEntryCount(), query_mem_desc_, storage_, and UNREACHABLE.

Referenced by findStorage(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

635  {
636  size_t fixedup_entry_idx = entry_idx;
637  auto entry_count = storage_->query_mem_desc_.getEntryCount();
638  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
639  if (fixedup_entry_idx < entry_count) {
640  return {0, fixedup_entry_idx};
641  }
642  fixedup_entry_idx -= entry_count;
643  for (size_t i = 0; i < appended_storage_.size(); ++i) {
644  const auto& desc = appended_storage_[i]->query_mem_desc_;
645  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
646  entry_count = desc.getEntryCount();
647  if (fixedup_entry_idx < entry_count) {
648  return {i + 1, fixedup_entry_idx};
649  }
650  fixedup_entry_idx -= entry_count;
651  }
652  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
654  return {};
655 }
AppendedStorage appended_storage_
Definition: ResultSet.h:823
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define UNREACHABLE()
Definition: Logger.h:241
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK_NE(x, y)
Definition: Logger.h:206
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getStringDictionaryPayloadCopy()

std::shared_ptr< const std::vector< std::string > > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 900 of file ResultSet.cpp.

References CHECK, executor_, and row_set_mem_owner_.

901  {
902  CHECK(executor_);
903  const auto sdp =
904  executor_->getStringDictionaryProxy(dict_id, row_set_mem_owner_, false);
905  return sdp->getDictionary()->copyStrings();
906 }
const Executor * executor_
Definition: ResultSet.h:832
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
#define CHECK(condition)
Definition: Logger.h:197

◆ getSupportedSingleSlotTargetBitmap()

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 978 of file ResultSet.cpp.

References CHECK, CHECK_GE, getSingleSlotTargetBitmap(), is_distinct_target(), isDirectColumnarConversionPossible(), kFLOAT, kSAMPLE, and targets_.

979  {
981  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
982 
983  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
984  const auto& target = targets_[target_idx];
985  if (single_slot_targets[target_idx] &&
986  (is_distinct_target(target) ||
987  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
988  single_slot_targets[target_idx] = false;
989  num_single_slot_targets--;
990  }
991  }
992  CHECK_GE(num_single_slot_targets, size_t(0));
993  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
994 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:923
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:117
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:954
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:

◆ getTargetInfos()

const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 432 of file ResultSet.cpp.

References targets_.

432  {
433  return targets_;
434 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818

◆ getTargetInitVals()

const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 436 of file ResultSet.cpp.

References CHECK, and storage_.

436  {
437  CHECK(storage_);
438  return storage_->target_init_vals_;
439 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK(condition)
Definition: Logger.h:197

◆ getTargetValueFromBufferColwise()

TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1914 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1924  {
1926  const auto col1_ptr = col_ptr;
1927  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1928  const auto next_col_ptr =
1929  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1930  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1931  is_real_str_or_array(target_info))
1932  ? next_col_ptr
1933  : nullptr;
1934  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1935  is_real_str_or_array(target_info))
1936  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1937  : 0;
1938 
1939  // TODO(Saman): add required logics for count distinct
1940  // geospatial target values:
1941  if (target_info.sql_type.is_geometry()) {
1942  return makeGeoTargetValue(
1943  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1944  }
1945 
1946  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1947  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1948  CHECK(col2_ptr);
1949  CHECK(compact_sz2);
1950  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1951  return target_info.agg_kind == kAVG
1952  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1953  : makeVarlenTargetValue(ptr1,
1954  compact_sz1,
1955  ptr2,
1956  compact_sz2,
1957  target_info,
1958  target_logical_idx,
1959  translate_strings,
1960  global_entry_idx);
1961  }
1963  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1964  return makeTargetValue(ptr1,
1965  compact_sz1,
1966  target_info,
1967  target_logical_idx,
1968  translate_strings,
1970  global_entry_idx);
1971  }
1972  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1973  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1974  CHECK_GE(key_idx, 0);
1975  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1976  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1977  key_width,
1978  target_info,
1979  target_logical_idx,
1980  translate_strings,
1982  global_entry_idx);
1983 }
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define CHECK_GE(x, y)
Definition: Logger.h:210
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
bool is_agg
Definition: TargetInfo.h:40
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
bool is_geometry() const
Definition: sqltypes.h:421
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:72
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getTargetValueFromBufferRowwise()

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1987 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, checked_malloc(), QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1996  {
1997  if (UNLIKELY(fixup_count_distinct_pointers)) {
1998  if (is_distinct_target(target_info)) {
1999  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2000  const auto remote_ptr = *count_distinct_ptr_ptr;
2001  if (remote_ptr) {
2002  const auto ptr = storage_->mappedPtr(remote_ptr);
2003  if (ptr) {
2004  *count_distinct_ptr_ptr = ptr;
2005  } else {
2006  // need to create a zero filled buffer for this remote_ptr
2007  const auto& count_distinct_desc =
2008  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2009  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2010  ? count_distinct_desc.bitmapSizeBytes()
2011  : count_distinct_desc.bitmapPaddedSizeBytes();
2012  auto count_distinct_buffer =
2013  static_cast<int8_t*>(checked_malloc(bitmap_byte_sz));
2014  memset(count_distinct_buffer, 0, bitmap_byte_sz);
2015  row_set_mem_owner_->addCountDistinctBuffer(
2016  count_distinct_buffer, bitmap_byte_sz, true);
2017  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2018  }
2019  }
2020  }
2021  return int64_t(0);
2022  }
2023  if (target_info.sql_type.is_geometry()) {
2024  return makeGeoTargetValue(
2025  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2026  }
2027 
2028  auto ptr1 = rowwise_target_ptr;
2029  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2031  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2032  // Single column perfect hash group by can utilize one slot for both the key and the
2033  // target value if both values fit in 8 bytes. Use the target value actual size for
2034  // this case. If they don't, the target value should be 8 bytes, so we can still use
2035  // the actual size rather than the compact size.
2036  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2037  }
2038 
2039  // logic for deciding width of column
2040  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2041  const auto ptr2 =
2042  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2043  int8_t compact_sz2 = 0;
2044  // Skip reading the second slot if we have a none encoded string and are using
2045  // the none encoded strings buffer attached to ResultSetStorage
2047  (target_info.sql_type.is_array() ||
2048  (target_info.sql_type.is_string() &&
2049  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2050  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2051  }
2052  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2053  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2054  }
2055  CHECK(ptr2);
2056  return target_info.agg_kind == kAVG
2057  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2058  : makeVarlenTargetValue(ptr1,
2059  compact_sz1,
2060  ptr2,
2061  compact_sz2,
2062  target_info,
2063  target_logical_idx,
2064  translate_strings,
2065  entry_buff_idx);
2066  }
2068  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2069  return makeTargetValue(ptr1,
2070  compact_sz1,
2071  target_info,
2072  target_logical_idx,
2073  translate_strings,
2075  entry_buff_idx);
2076  }
2077  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2078  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2079  return makeTargetValue(ptr1,
2080  key_width,
2081  target_info,
2082  target_logical_idx,
2083  translate_strings,
2085  entry_buff_idx);
2086 }
bool is_array() const
Definition: sqltypes.h:417
bool is_string() const
Definition: sqltypes.h:409
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:266
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
bool is_agg
Definition: TargetInfo.h:40
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:44
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:117
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
#define UNLIKELY(x)
Definition: likely.h:20
bool is_real_str_or_array(const TargetInfo &target_info)
bool is_geometry() const
Definition: sqltypes.h:421
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:197
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:72
bool isSingleColumnGroupByWithPerfectHash() const
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getVarlenOrderEntry()

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK, copy_from_gpu(), CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto& data_mgr = executor->catalog_->getDataMgr();
636  copy_from_gpu(&data_mgr,
637  &cpu_buffer[0],
638  static_cast<CUdeviceptr>(str_ptr),
639  str_len,
640  device_id_);
641  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
642  } else {
644  host_str_ptr = reinterpret_cast<char*>(str_ptr);
645  }
646  std::string str(host_str_ptr, str_len);
647  return InternalTargetValue(row_set_mem_owner_->addString(str));
648 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
const Executor * getExecutor() const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197
const int device_id_
Definition: ResultSet.h:820
+ Here is the call graph for this function:

◆ holdChunkIterators()

void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 466 of file ResultSet.h.

466  {
467  chunk_iters_.push_back(chunk_iters);
468  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:835

◆ holdChunks()

void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 463 of file ResultSet.h.

463  {
464  chunks_ = chunks;
465  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:834

◆ holdLiterals()

void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 469 of file ResultSet.h.

469  {
470  literal_buffers_.push_back(std::move(literal_buff));
471  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:838

◆ initializeStorage()

void ResultSet::initializeStorage ( ) const

Definition at line 989 of file ResultSetReduction.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

989  {
991  storage_->initializeColWise();
992  } else {
993  storage_->initializeRowWise();
994  }
995 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
+ Here is the call graph for this function:

◆ initPermutationBuffer()

std::vector< uint32_t > ResultSet::initPermutationBuffer ( const size_t  start,
const size_t  step 
)
private

Definition at line 574 of file ResultSet.cpp.

References CHECK, CHECK_NE, findStorage(), QueryMemoryDescriptor::getEntryCount(), and query_mem_desc_.

Referenced by parallelTop(), and sort().

575  {
576  CHECK_NE(size_t(0), step);
577  std::vector<uint32_t> permutation;
578  const auto total_entries = query_mem_desc_.getEntryCount();
579  permutation.reserve(total_entries / step);
580  for (size_t i = start; i < total_entries; i += step) {
581  const auto storage_lookup_result = findStorage(i);
582  const auto lhs_storage = storage_lookup_result.storage_ptr;
583  const auto off = storage_lookup_result.fixedup_entry_idx;
584  CHECK(lhs_storage);
585  if (!lhs_storage->isEmptyEntry(off)) {
586  permutation.push_back(i);
587  }
588  }
589  return permutation;
590 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define CHECK_NE(x, y)
Definition: Logger.h:206
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isDirectColumnarConversionPossible()

bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 923 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), g_enable_direct_columnarization, QueryMemoryDescriptor::getQueryDescriptionType(), GroupByBaselineHash, GroupByPerfectHash, permutation_, Projection, and query_mem_desc_.

Referenced by copyColumnIntoBuffer(), and getSupportedSingleSlotTargetBitmap().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isExplain()

bool ResultSet::isExplain ( ) const

Definition at line 484 of file ResultSet.cpp.

References just_explain_.

484  {
485  return just_explain_;
486 }
const bool just_explain_
Definition: ResultSet.h:855

◆ isGeoColOnGpu()

bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1483 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1483  {
1484  // This should match the logic in makeGeoTargetValue which ultimately calls
1485  // fetch_data_from_gpu when the geo column is on the device.
1486  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1487  // utility function that handles this logic in one place
1488  CHECK_LT(col_idx, targets_.size());
1489  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1490  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1491  " is not a geo column. It is of type " +
1492  targets_[col_idx].sql_type.get_type_name() + ".");
1493  }
1494 
1495  const auto& target_info = targets_[col_idx];
1496  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1497  return false;
1498  }
1499 
1500  if (!lazy_fetch_info_.empty()) {
1501  CHECK_LT(col_idx, lazy_fetch_info_.size());
1502  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1503  return false;
1504  }
1505  }
1506 
1508 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
#define CHECK_LT(x, y)
Definition: Logger.h:207
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
#define IS_GEO(T)
Definition: sqltypes.h:173
+ Here is the call graph for this function:

◆ isNull()

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2225 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfo::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), NULL_DOUBLE, null_val_bit_pattern(), and pair_to_double().

Referenced by ResultSet::ResultSetComparator< BUFFER_ITERATOR_TYPE >::operator()().

2227  {
2228  if (ti.get_notnull()) {
2229  return false;
2230  }
2231  if (val.isInt()) {
2232  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2233  }
2234  if (val.isPair()) {
2235  return !val.i2 ||
2236  pair_to_double({val.i1, val.i2}, ti, float_argument_input) == NULL_DOUBLE;
2237  }
2238  if (val.isStr()) {
2239  return !val.i1;
2240  }
2241  CHECK(val.isNull());
2242  return true;
2243 }
#define NULL_DOUBLE
Definition: sqltypes.h:185
bool isNull() const
Definition: TargetValue.h:69
bool isPair() const
Definition: TargetValue.h:67
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isStr() const
Definition: TargetValue.h:71
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:265
bool isInt() const
Definition: TargetValue.h:65
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isPermutationBufferEmpty()

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 478 of file ResultSet.h.

478 { return permutation_.empty(); };
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829

◆ isRowAtEmpty()

bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 281 of file ResultSetIteration.cpp.

Referenced by parallelRowCount().

281  {
282  if (logical_index >= entryCount()) {
283  return true;
284  }
285  const auto entry_idx =
286  permutation_.empty() ? logical_index : permutation_[logical_index];
287  const auto storage_lookup_result = findStorage(entry_idx);
288  const auto storage = storage_lookup_result.storage_ptr;
289  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
290  return storage->isEmptyEntry(local_entry_idx);
291 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
+ Here is the caller graph for this function:

◆ isTruncated()

bool ResultSet::isTruncated ( ) const

Definition at line 480 of file ResultSet.cpp.

References drop_first_, and keep_first_.

480  {
481  return keep_first_ + drop_first_;
482 }
size_t keep_first_
Definition: ResultSet.h:827
size_t drop_first_
Definition: ResultSet.h:826

◆ isZeroCopyColumnarConversionPossible()

bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 941 of file ResultSet.cpp.

References appended_storage_, QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getQueryDescriptionType(), lazy_fetch_info_, Projection, query_mem_desc_, and storage_.

Referenced by getColumnarBuffer().

941  {
944  appended_storage_.empty() && storage_ &&
945  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
946 }
AppendedStorage appended_storage_
Definition: ResultSet.h:823
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ keepFirstN()

void ResultSet::keepFirstN ( const size_t  n)

Definition at line 90 of file ResultSet.cpp.

References CHECK_EQ.

90  {
92  keep_first_ = n;
93 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t keep_first_
Definition: ResultSet.h:827
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856

◆ lazyReadInt()

int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 650 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

652  {
653  if (!lazy_fetch_info_.empty()) {
654  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
655  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
656  if (col_lazy_fetch.is_lazily_fetched) {
657  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
658  col_buffers_.size());
659  int64_t ival_copy = ival;
660  auto& frag_col_buffers =
661  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
662  target_logical_idx,
663  ival_copy);
664  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
665  CHECK_LT(target_logical_idx, targets_.size());
666  const TargetInfo& target_info = targets_[target_logical_idx];
667  CHECK(!target_info.is_agg);
668  if (target_info.sql_type.is_string() &&
669  target_info.sql_type.get_compression() == kENCODING_NONE) {
670  VarlenDatum vd;
671  bool is_end{false};
673  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
674  storage_lookup_result.fixedup_entry_idx,
675  false,
676  &vd,
677  &is_end);
678  CHECK(!is_end);
679  if (vd.is_null) {
680  return 0;
681  }
682  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
683  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
684  }
685  return lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
686  }
687  }
688  return ival;
689 }
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:74
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:73
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
#define CHECK(condition)
Definition: Logger.h:197
size_t length
Definition: sqltypes.h:72
+ Here is the call graph for this function:

◆ makeGeoTargetValue()

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1514 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1518  {
1519  CHECK(target_info.sql_type.is_geometry());
1520 
1521  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1522  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1523  };
1524 
1525  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1526  const auto storage_info = findStorage(entry_buff_idx);
1527  auto crt_geo_col_ptr = geo_target_ptr;
1528  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1529  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1530  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1531  }
1532  // adjusting the column pointer to represent a pointer to the geo target value
1533  return crt_geo_col_ptr +
1534  storage_info.fixedup_entry_idx *
1535  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1536  slot_idx + range);
1537  };
1538 
1539  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1541  ? getNextTargetBufferColWise(slot_idx, range)
1542  : getNextTargetBufferRowWise(slot_idx, range);
1543  };
1544 
1545  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1546  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1548  };
1549 
1550  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1551  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1553  };
1554 
1555  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1556  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1558  };
1559 
1560  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1561  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1563  };
1564 
1565  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1566  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1568  };
1569 
1570  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1571  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1573  };
1574 
1575  auto getFragColBuffers = [&]() -> decltype(auto) {
1576  const auto storage_idx = getStorageIndex(entry_buff_idx);
1577  CHECK_LT(storage_idx.first, col_buffers_.size());
1578  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1579  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1580  };
1581 
1582  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1583 
1584  auto getDataMgr = [&]() {
1585  auto executor = query_mem_desc_.getExecutor();
1586  CHECK(executor);
1587  auto& data_mgr = executor->catalog_->getDataMgr();
1588  return &data_mgr;
1589  };
1590 
1591  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1592  const auto storage_idx = getStorageIndex(entry_buff_idx);
1593  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1594  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1595  return varlen_buffer;
1596  };
1597 
1598  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1599  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1600  return TargetValue(nullptr);
1601  }
1602 
1603  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1604  if (!lazy_fetch_info_.empty()) {
1605  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1606  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1607  }
1608 
1609  switch (target_info.sql_type.get_type()) {
1610  case kPOINT: {
1611  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1612  const auto& varlen_buffer = getSeparateVarlenStorage();
1613  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1614  varlen_buffer.size());
1615 
1616  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1617  target_info.sql_type,
1619  nullptr,
1620  false,
1621  device_id_,
1622  reinterpret_cast<int64_t>(
1623  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1624  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1625  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1626  const auto& frag_col_buffers = getFragColBuffers();
1627  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1628  target_info.sql_type,
1630  frag_col_buffers[col_lazy_fetch->local_col_id],
1631  getCoordsDataPtr(geo_target_ptr));
1632  } else {
1633  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1634  target_info.sql_type,
1636  is_gpu_fetch ? getDataMgr() : nullptr,
1637  is_gpu_fetch,
1638  device_id_,
1639  getCoordsDataPtr(geo_target_ptr),
1640  getCoordsLength(geo_target_ptr));
1641  }
1642  break;
1643  }
1644  case kLINESTRING: {
1645  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1646  const auto& varlen_buffer = getSeparateVarlenStorage();
1647  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1648  varlen_buffer.size());
1649 
1650  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1651  target_info.sql_type,
1653  nullptr,
1654  false,
1655  device_id_,
1656  reinterpret_cast<int64_t>(
1657  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1658  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1659  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1660  const auto& frag_col_buffers = getFragColBuffers();
1661  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1662  target_info.sql_type,
1664  frag_col_buffers[col_lazy_fetch->local_col_id],
1665  getCoordsDataPtr(geo_target_ptr));
1666  } else {
1667  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1668  target_info.sql_type,
1670  is_gpu_fetch ? getDataMgr() : nullptr,
1671  is_gpu_fetch,
1672  device_id_,
1673  getCoordsDataPtr(geo_target_ptr),
1674  getCoordsLength(geo_target_ptr));
1675  }
1676  break;
1677  }
1678  case kPOLYGON: {
1679  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1680  const auto& varlen_buffer = getSeparateVarlenStorage();
1681  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1682  varlen_buffer.size());
1683 
1684  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1685  target_info.sql_type,
1687  nullptr,
1688  false,
1689  device_id_,
1690  reinterpret_cast<int64_t>(
1691  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1692  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1693  reinterpret_cast<int64_t>(
1694  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1695  static_cast<int64_t>(
1696  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1697  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1698  const auto& frag_col_buffers = getFragColBuffers();
1699 
1700  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1701  target_info.sql_type,
1703  frag_col_buffers[col_lazy_fetch->local_col_id],
1704  getCoordsDataPtr(geo_target_ptr),
1705  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1706  getCoordsDataPtr(geo_target_ptr));
1707  } else {
1708  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1709  target_info.sql_type,
1711  is_gpu_fetch ? getDataMgr() : nullptr,
1712  is_gpu_fetch,
1713  device_id_,
1714  getCoordsDataPtr(geo_target_ptr),
1715  getCoordsLength(geo_target_ptr),
1716  getRingSizesPtr(geo_target_ptr),
1717  getRingSizesLength(geo_target_ptr) * 4);
1718  }
1719  break;
1720  }
1721  case kMULTIPOLYGON: {
1722  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1723  const auto& varlen_buffer = getSeparateVarlenStorage();
1724  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1725  varlen_buffer.size());
1726 
1727  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1728  target_info.sql_type,
1730  nullptr,
1731  false,
1732  device_id_,
1733  reinterpret_cast<int64_t>(
1734  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1735  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1736  reinterpret_cast<int64_t>(
1737  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1738  static_cast<int64_t>(
1739  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1740  reinterpret_cast<int64_t>(
1741  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1742  static_cast<int64_t>(
1743  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1744  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1745  const auto& frag_col_buffers = getFragColBuffers();
1746 
1747  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1748  target_info.sql_type,
1750  frag_col_buffers[col_lazy_fetch->local_col_id],
1751  getCoordsDataPtr(geo_target_ptr),
1752  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1753  getCoordsDataPtr(geo_target_ptr),
1754  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1755  getCoordsDataPtr(geo_target_ptr));
1756  } else {
1757  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1758  target_info.sql_type,
1760  is_gpu_fetch ? getDataMgr() : nullptr,
1761  is_gpu_fetch,
1762  device_id_,
1763  getCoordsDataPtr(geo_target_ptr),
1764  getCoordsLength(geo_target_ptr),
1765  getRingSizesPtr(geo_target_ptr),
1766  getRingSizesLength(geo_target_ptr) * 4,
1767  getPolyRingsPtr(geo_target_ptr),
1768  getPolyRingsLength(geo_target_ptr) * 4);
1769  }
1770  break;
1771  }
1772  default:
1773  throw std::runtime_error("Unknown Geometry type encountered: " +
1774  target_info.sql_type.get_type_name());
1775  }
1776  UNREACHABLE();
1777  return TargetValue(nullptr);
1778 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:860
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define UNREACHABLE()
Definition: Logger.h:241
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:852
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const int local_col_id
Definition: ResultSet.h:235
const Executor * getExecutor() const
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:657
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool is_geometry() const
Definition: sqltypes.h:421
std::string get_type_name() const
Definition: sqltypes.h:361
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
const bool is_lazily_fetched
Definition: ResultSet.h:234
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:258
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const int device_id_
Definition: ResultSet.h:820
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeTargetValue()

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1781 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), executor_, exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), SQLTypeInfo::get_comp_param(), get_compact_type(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_date_in_days(), is_distinct_target(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1787  {
1788  auto actual_compact_sz = compact_sz;
1789  if (target_info.sql_type.get_type() == kFLOAT &&
1792  actual_compact_sz = sizeof(float);
1793  } else {
1794  actual_compact_sz = sizeof(double);
1795  }
1796  if (target_info.is_agg &&
1797  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1798  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1799  target_info.agg_kind == kSINGLE_VALUE)) {
1800  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1801  // padded size to 4 bytes to properly read each value.
1802  actual_compact_sz = sizeof(float);
1803  }
1804  }
1805  if (get_compact_type(target_info).is_date_in_days()) {
1806  // Dates encoded in days are converted to 8 byte values on read.
1807  actual_compact_sz = sizeof(int64_t);
1808  }
1809 
1810  // String dictionary keys are read as 32-bit values regardless of encoding
1811  if (target_info.sql_type.is_string() &&
1812  target_info.sql_type.get_compression() == kENCODING_DICT &&
1813  target_info.sql_type.get_comp_param()) {
1814  actual_compact_sz = sizeof(int32_t);
1815  }
1816 
1817  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1818  const auto& chosen_type = get_compact_type(target_info);
1819  if (!lazy_fetch_info_.empty()) {
1820  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1821  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1822  if (col_lazy_fetch.is_lazily_fetched) {
1823  CHECK_GE(ival, 0);
1824  const auto storage_idx = getStorageIndex(entry_buff_idx);
1825  CHECK_LT(storage_idx.first, col_buffers_.size());
1826  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1827  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1828  ival = lazy_decode(
1829  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1830  if (chosen_type.is_fp()) {
1831  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1832  if (chosen_type.get_type() == kFLOAT) {
1833  return ScalarTargetValue(static_cast<float>(dval));
1834  } else {
1835  return ScalarTargetValue(dval);
1836  }
1837  }
1838  }
1839  }
1840  if (chosen_type.is_fp()) {
1841  switch (actual_compact_sz) {
1842  case 8: {
1843  const auto dval = *reinterpret_cast<const double*>(ptr);
1844  return chosen_type.get_type() == kFLOAT
1845  ? ScalarTargetValue(static_cast<const float>(dval))
1846  : ScalarTargetValue(dval);
1847  }
1848  case 4: {
1849  CHECK_EQ(kFLOAT, chosen_type.get_type());
1850  return *reinterpret_cast<const float*>(ptr);
1851  }
1852  default:
1853  CHECK(false);
1854  }
1855  }
1856  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1857  chosen_type.is_timeinterval()) {
1858  if (is_distinct_target(target_info)) {
1860  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1861  }
1862  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1863  // right type instead
1864  if (inline_int_null_val(chosen_type) ==
1865  int_resize_cast(ival, chosen_type.get_logical_size())) {
1866  return inline_int_null_val(target_info.sql_type);
1867  }
1868  return ival;
1869  }
1870  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1871  if (translate_strings) {
1872  if (static_cast<int32_t>(ival) ==
1873  NULL_INT) { // TODO(alex): this isn't nice, fix it
1874  return NullableString(nullptr);
1875  }
1876  StringDictionaryProxy* sdp{nullptr};
1877  if (!chosen_type.get_comp_param()) {
1878  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1879  } else {
1880  sdp = executor_
1881  ? executor_->getStringDictionaryProxy(
1882  chosen_type.get_comp_param(), row_set_mem_owner_, false)
1883  : row_set_mem_owner_->getStringDictProxy(chosen_type.get_comp_param());
1884  }
1885  return NullableString(sdp->getString(ival));
1886  } else {
1887  return static_cast<int64_t>(static_cast<int32_t>(ival));
1888  }
1889  }
1890  if (chosen_type.is_decimal()) {
1891  if (decimal_to_double) {
1892  if (target_info.is_agg &&
1893  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1894  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1895  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1896  return NULL_DOUBLE;
1897  }
1898  if (ival ==
1899  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1900  return NULL_DOUBLE;
1901  }
1902  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1903  }
1904  return ival;
1905  }
1906  CHECK(false);
1907  return TargetValue(int64_t(0));
1908 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define NULL_DOUBLE
Definition: sqltypes.h:185
bool is_string() const
Definition: sqltypes.h:409
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:832
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:267
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:266
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
Definition: sqldefs.h:75
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:117
#define NULL_INT
Definition: sqltypes.h:182
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:302
#define CHECK_LT(x, y)
Definition: Logger.h:207
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:258
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Definition: sqldefs.h:74
Definition: sqldefs.h:72
bool is_date_in_days() const
Definition: sqltypes.h:625
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeVarlenTargetValue()

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1357 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, copy_from_gpu(), device_id_, device_type_, executor_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1364  {
1365  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1366  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1367  if (varlen_ptr < 0) {
1368  CHECK_EQ(-1, varlen_ptr);
1369  if (target_info.sql_type.get_type() == kARRAY) {
1370  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1371  }
1372  return TargetValue(nullptr);
1373  }
1374  const auto storage_idx = getStorageIndex(entry_buff_idx);
1375  if (target_info.sql_type.is_string()) {
1376  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1377  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1378  const auto& varlen_buffer_for_storage =
1379  serialized_varlen_buffer_[storage_idx.first];
1380  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1381  return varlen_buffer_for_storage[varlen_ptr];
1382  } else if (target_info.sql_type.get_type() == kARRAY) {
1383  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1384  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1385  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1386 
1387  return build_array_target_value(
1388  target_info.sql_type,
1389  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1390  varlen_buffer[varlen_ptr].size(),
1391  translate_strings,
1393  executor_);
1394  } else {
1395  CHECK(false);
1396  }
1397  }
1398  if (!lazy_fetch_info_.empty()) {
1399  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1400  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1401  if (col_lazy_fetch.is_lazily_fetched) {
1402  const auto storage_idx = getStorageIndex(entry_buff_idx);
1403  CHECK_LT(storage_idx.first, col_buffers_.size());
1404  auto& frag_col_buffers =
1405  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1406  bool is_end{false};
1407  if (target_info.sql_type.is_string()) {
1408  VarlenDatum vd;
1409  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1410  frag_col_buffers[col_lazy_fetch.local_col_id])),
1411  varlen_ptr,
1412  false,
1413  &vd,
1414  &is_end);
1415  CHECK(!is_end);
1416  if (vd.is_null) {
1417  return TargetValue(nullptr);
1418  }
1419  CHECK(vd.pointer);
1420  CHECK_GT(vd.length, 0u);
1421  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1422  return fetched_str;
1423  } else {
1424  CHECK(target_info.sql_type.is_array());
1425  ArrayDatum ad;
1426  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1427  frag_col_buffers[col_lazy_fetch.local_col_id])),
1428  varlen_ptr,
1429  &ad,
1430  &is_end);
1431  CHECK(!is_end);
1432  if (ad.is_null) {
1433  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1434  }
1435  CHECK_GE(ad.length, 0u);
1436  if (ad.length > 0) {
1437  CHECK(ad.pointer);
1438  }
1439  return build_array_target_value(target_info.sql_type,
1440  ad.pointer,
1441  ad.length,
1442  translate_strings,
1444  executor_);
1445  }
1446  }
1447  }
1448  if (!varlen_ptr) {
1449  if (target_info.sql_type.is_array()) {
1450  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1451  }
1452  return TargetValue(nullptr);
1453  }
1454  auto length = read_int_from_buff(ptr2, compact_sz2);
1455  if (target_info.sql_type.is_array()) {
1456  const auto& elem_ti = target_info.sql_type.get_elem_type();
1457  length *= elem_ti.get_array_context_logical_size();
1458  }
1459  std::vector<int8_t> cpu_buffer;
1460  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1461  cpu_buffer.resize(length);
1462  const auto executor = query_mem_desc_.getExecutor();
1463  CHECK(executor);
1464  auto& data_mgr = executor->catalog_->getDataMgr();
1465  copy_from_gpu(&data_mgr,
1466  &cpu_buffer[0],
1467  static_cast<CUdeviceptr>(varlen_ptr),
1468  length,
1469  device_id_);
1470  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1471  }
1472  if (target_info.sql_type.is_array()) {
1473  return build_array_target_value(target_info.sql_type,
1474  reinterpret_cast<const int8_t*>(varlen_ptr),
1475  length,
1476  translate_strings,
1478  executor_);
1479  }
1480  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1481 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool is_array() const
Definition: sqltypes.h:417
bool is_string() const
Definition: sqltypes.h:409
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:74
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
int get_array_context_logical_size() const
Definition: sqltypes.h:460
const Executor * executor_
Definition: ResultSet.h:832
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:852
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:266
#define CHECK_GT(x, y)
Definition: Logger.h:209
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:73
const Executor * getExecutor() const
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:129
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:828
bool is_agg
Definition: TargetInfo.h:40
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:635
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
boost::optional< std::vector< ScalarTargetValue > > ArrayTargetValue
Definition: TargetValue.h:157
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:839
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:840
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:617
#define CHECK(condition)
Definition: Logger.h:197
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:258
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
TargetValue build_array_target_value(const SQLTypeInfo &array_ti, const int8_t *buff, const size_t buff_sz, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
size_t length
Definition: sqltypes.h:72
const int device_id_
Definition: ResultSet.h:820
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ moveToBegin()

void ResultSet::moveToBegin ( ) const

Definition at line 475 of file ResultSet.cpp.

References crt_row_buff_idx_, and fetched_so_far_.

Referenced by rowCount().

475  {
476  crt_row_buff_idx_ = 0;
477  fetched_so_far_ = 0;
478 }
size_t fetched_so_far_
Definition: ResultSet.h:825
size_t crt_row_buff_idx_
Definition: ResultSet.h:824
+ Here is the caller graph for this function:

◆ parallelRowCount()

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 385 of file ResultSet.cpp.

References cpu_threads(), drop_first_, entryCount(), g_use_tbb_pool, isRowAtEmpty(), and keep_first_.

Referenced by rowCount().

385  {
386  auto execute_parallel_row_count = [this](auto counter_threads) -> size_t {
387  const size_t worker_count = cpu_threads();
388  for (size_t i = 0,
389  start_entry = 0,
390  stride = (entryCount() + worker_count - 1) / worker_count;
391  i < worker_count && start_entry < entryCount();
392  ++i, start_entry += stride) {
393  const auto end_entry = std::min(start_entry + stride, entryCount());
394  counter_threads.append(
395  [this](const size_t start, const size_t end) {
396  size_t row_count{0};
397  for (size_t i = start; i < end; ++i) {
398  if (!isRowAtEmpty(i)) {
399  ++row_count;
400  }
401  }
402  return row_count;
403  },
404  start_entry,
405  end_entry);
406  }
407  const auto row_counts = counter_threads.join();
408  const size_t row_count = std::accumulate(row_counts.begin(), row_counts.end(), 0);
409  return row_count;
410  };
411  // will fall back to futures threadpool if TBB is not enabled
412  const auto row_count =
414  ? execute_parallel_row_count(threadpool::ThreadPool<size_t>())
415  : execute_parallel_row_count(threadpool::FuturesThreadPool<size_t>());
416  if (keep_first_ + drop_first_) {
417  const auto limited_row_count = std::min(keep_first_ + drop_first_, row_count);
418  return limited_row_count < drop_first_ ? 0 : limited_row_count - drop_first_;
419  }
420  return row_count;
421 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:827
size_t drop_first_
Definition: ResultSet.h:826
bool g_use_tbb_pool
Definition: Execute.cpp:75
bool isRowAtEmpty(const size_t index) const
int cpu_threads()
Definition: thread_count.h:25
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parallelTop()

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Definition at line 596 of file ResultSet.cpp.

References cpu_threads(), createComparator(), initPermutationBuffer(), permutation_, and topPermutation().

Referenced by sort().

597  {
598  const size_t step = cpu_threads();
599  std::vector<std::vector<uint32_t>> strided_permutations(step);
600  std::vector<std::future<void>> init_futures;
601  for (size_t start = 0; start < step; ++start) {
602  init_futures.emplace_back(
603  std::async(std::launch::async, [this, start, step, &strided_permutations] {
604  strided_permutations[start] = initPermutationBuffer(start, step);
605  }));
606  }
607  for (auto& init_future : init_futures) {
608  init_future.wait();
609  }
610  for (auto& init_future : init_futures) {
611  init_future.get();
612  }
613  auto compare = createComparator(order_entries, true);
614  std::vector<std::future<void>> top_futures;
615  for (auto& strided_permutation : strided_permutations) {
616  top_futures.emplace_back(
617  std::async(std::launch::async, [&strided_permutation, &compare, top_n] {
618  topPermutation(strided_permutation, top_n, compare);
619  }));
620  }
621  for (auto& top_future : top_futures) {
622  top_future.wait();
623  }
624  for (auto& top_future : top_futures) {
625  top_future.get();
626  }
627  permutation_.reserve(strided_permutations.size() * top_n);
628  for (const auto& strided_permutation : strided_permutations) {
629  permutation_.insert(
630  permutation_.end(), strided_permutation.begin(), strided_permutation.end());
631  }
632  topPermutation(permutation_, top_n, compare);
633 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:574
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:787
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:757
int cpu_threads()
Definition: thread_count.h:25
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnCpu()

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 841 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), QueryMemoryDescriptor::hasKeylessHash(), query_mem_desc_, sort_groups_cpu(), and storage_.

Referenced by sort().

842  {
844  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
845  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
846  CHECK_EQ(size_t(1), order_entries.size());
847  auto buffer_ptr = storage_->getUnderlyingBuffer();
848  for (const auto& order_entry : order_entries) {
849  const auto target_idx = order_entry.tle_no - 1;
850  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
851  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
852  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
853  sort_groups_cpu(sortkey_val_buff,
854  &idx_buff[0],
856  order_entry.is_desc,
857  chosen_bytes);
858  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
859  &idx_buff[0],
861  &tmp_buff[0],
862  sizeof(int64_t));
863  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
864  ++target_idx) {
865  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
866  continue;
867  }
868  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
869  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
870  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
871  apply_permutation_cpu(satellite_val_buff,
872  &idx_buff[0],
874  &tmp_buff[0],
875  chosen_bytes);
876  }
877  }
878 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:197
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnGpu()

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 807 of file ResultSet.cpp.

References copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), executor_, QueryMemoryDescriptor::getBufferSizeBytes(), GPU, inplace_sort_gpu(), KernelPerFragment, query_mem_desc_, and storage_.

Referenced by sort().

808  {
809  auto data_mgr = &executor_->catalog_->getDataMgr();
810  const int device_id{0};
811  CudaAllocator cuda_allocator(data_mgr, device_id);
812  std::vector<int64_t*> group_by_buffers(executor_->blockSize());
813  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
814  auto dev_group_by_buffers =
815  create_dev_group_by_buffers(&cuda_allocator,
816  group_by_buffers,
818  executor_->blockSize(),
819  executor_->gridSize(),
820  device_id,
822  -1,
823  true,
824  true,
825  false,
826  nullptr);
828  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
830  data_mgr,
831  group_by_buffers,
833  dev_group_by_buffers.second,
835  executor_->blockSize(),
836  executor_->gridSize(),
837  device_id,
838  false);
839 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *cuda_allocator, const std::vector< int64_t *> &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:61
const Executor * executor_
Definition: ResultSet.h:832
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
void copy_group_by_buffers_from_gpu(Data_Namespace::DataMgr *data_mgr, const std::vector< int64_t *> &group_by_buffers, const size_t groups_buffer_size, const CUdeviceptr group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer)
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rowCount()

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Definition at line 325 of file ResultSet.cpp.

References binSearchRowCount(), cached_row_count_, CHECK_GE, drop_first_, entryCount(), getNextRowUnlocked(), QueryMemoryDescriptor::getQueryDescriptionType(), just_explain_, keep_first_, moveToBegin(), parallelRowCount(), permutation_, Projection, query_mem_desc_, row_iteration_mutex_, and storage_.

325  {
326  if (just_explain_) {
327  return 1;
328  }
329  if (!permutation_.empty()) {
330  const auto limited_row_count = keep_first_ + drop_first_;
331  return limited_row_count ? std::min(limited_row_count, permutation_.size())
332  : permutation_.size();
333  }
334  if (cached_row_count_ != -1) {
336  return cached_row_count_;
337  }
338  if (!storage_) {
339  return 0;
340  }
341  if (permutation_.empty() &&
343  return binSearchRowCount();
344  }
345  if (force_parallel || entryCount() > 20000) {
346  return parallelRowCount();
347  }
348  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
349  moveToBegin();
350  size_t row_count{0};
351  while (true) {
352  auto crt_row = getNextRowUnlocked(false, false);
353  if (crt_row.empty()) {
354  break;
355  }
356  ++row_count;
357  }
358  moveToBegin();
359  return row_count;
360 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:857
size_t entryCount() const
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
size_t keep_first_
Definition: ResultSet.h:827
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:855
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
size_t drop_first_
Definition: ResultSet.h:826
size_t binSearchRowCount() const
Definition: ResultSet.cpp:367
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
size_t parallelRowCount() const
Definition: ResultSet.cpp:385
QueryDescriptionType getQueryDescriptionType() const
void moveToBegin() const
Definition: ResultSet.cpp:475
+ Here is the call graph for this function:

◆ rowIterator() [1/2]

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 336 of file ResultSet.h.

338  {
339  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
340 
341  // move to first logical position
342  ++rowIterator;
343 
344  for (size_t index = 0; index < from_logical_index; index++) {
345  ++rowIterator;
346  }
347 
348  return rowIterator;
349  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

◆ rowIterator() [2/2]

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 351 of file ResultSet.h.

352  {
353  return rowIterator(0, translate_strings, decimal_to_double);
354  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

◆ serialize()

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const

◆ serializeCountDistinctColumns()

void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private

◆ serializeProjection()

void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private

◆ serializeVarlenAggColumn()

void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private

◆ setCachedRowCount()

void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 362 of file ResultSet.cpp.

References cached_row_count_, and CHECK.

362  {
363  CHECK(cached_row_count_ == -1 || cached_row_count_ == static_cast<ssize_t>(row_count));
364  cached_row_count_ = row_count;
365 }
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
#define CHECK(condition)
Definition: Logger.h:197

◆ setGeoReturnType()

void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 500 of file ResultSet.h.

500 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:860

◆ setQueueTime()

void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 463 of file ResultSet.cpp.

References queue_time_ms_.

463  {
464  queue_time_ms_ = queue_time;
465 }
int64_t queue_time_ms_
Definition: ResultSet.h:830

◆ setSeparateVarlenStorageValid()

void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 532 of file ResultSet.h.

References ResultSetStorage::binSearchRowCount().

532  {
534  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:853
+ Here is the call graph for this function:

◆ sort()

void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)

Definition at line 504 of file ResultSet.cpp.

References Executor::baseline_threshold, baselineSort(), cached_row_count_, canUseFastBaselineSort(), CHECK, CHECK_EQ, CPU, createComparator(), DEBUG_TIMER, doBaselineSort(), entryCount(), g_enable_watchdog, QueryMemoryDescriptor::getEntryCount(), getGpuCount(), GPU, initPermutationBuffer(), LOG, parallelTop(), permutation_, query_mem_desc_, radixSortOnCpu(), radixSortOnGpu(), QueryMemoryDescriptor::sortOnGpu(), sortPermutation(), targets_, topPermutation(), and logger::WARNING.

Referenced by RelAlgExecutor::executeRelAlgQueryNoRetry(), RelAlgExecutor::executeRelAlgQuerySingleStep(), RelAlgExecutor::executeRelAlgStep(), and RelAlgExecutor::executeSort().

505  {
506  auto timer = DEBUG_TIMER(__func__);
508  CHECK(!targets_.empty());
509 #ifdef HAVE_CUDA
510  if (canUseFastBaselineSort(order_entries, top_n)) {
511  baselineSort(order_entries, top_n);
512  return;
513  }
514 #endif // HAVE_CUDA
515  if (query_mem_desc_.sortOnGpu()) {
516  try {
517  radixSortOnGpu(order_entries);
518  } catch (const OutOfMemory&) {
519  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
520  radixSortOnCpu(order_entries);
521  } catch (const std::bad_alloc&) {
522  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
523  radixSortOnCpu(order_entries);
524  }
525  return;
526  }
527  // This check isn't strictly required, but allows the index buffer to be 32-bit.
528  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
529  throw RowSortException("Sorting more than 4B elements not supported");
530  }
531 
532  CHECK(permutation_.empty());
533 
534  const bool use_heap{order_entries.size() == 1 && top_n};
535  if (use_heap && entryCount() > 100000) {
536  if (g_enable_watchdog && (entryCount() > 20000000)) {
537  throw WatchdogException("Sorting the result would be too slow");
538  }
539  parallelTop(order_entries, top_n);
540  return;
541  }
542 
544  throw WatchdogException("Sorting the result would be too slow");
545  }
546 
548 
549  auto compare = createComparator(order_entries, use_heap);
550 
551  if (use_heap) {
552  topPermutation(permutation_, top_n, compare);
553  } else {
554  sortPermutation(compare);
555  }
556 }
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t entryCount() const
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:841
#define LOG(tag)
Definition: Logger.h:188
static const size_t baseline_threshold
Definition: Execute.h:1000
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:807
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:574
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:818
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:856
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:787
void sortPermutation(const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:802
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
Definition: ResultSet.cpp:596
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
bool g_enable_watchdog
Definition: Execute.cpp:73
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:757
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ sortPermutation()

void ResultSet::sortPermutation ( const std::function< bool(const uint32_t, const uint32_t)>  compare)
private

Definition at line 802 of file ResultSet.cpp.

References permutation_.

Referenced by sort().

803  {
804  std::sort(permutation_.begin(), permutation_.end(), compare);
805 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:829
+ Here is the caller graph for this function:

◆ syncEstimatorBuffer()

void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 450 of file ResultSet.cpp.

References CHECK, CHECK_EQ, checked_calloc(), copy_from_gpu(), data_mgr_, device_id_, device_type_, estimator_, estimator_buffer_, GPU, and host_estimator_buffer_.

450  {
453  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
455  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
458  reinterpret_cast<CUdeviceptr>(estimator_buffer_),
459  estimator_->getBufferSize(),
460  device_id_);
461 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
int8_t * estimator_buffer_
Definition: ResultSet.h:845
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:52
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:847
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:844
int8_t * host_estimator_buffer_
Definition: ResultSet.h:846
const ExecutorDeviceType device_type_
Definition: ResultSet.h:819
#define CHECK(condition)
Definition: Logger.h:197
const int device_id_
Definition: ResultSet.h:820
+ Here is the call graph for this function:

◆ topPermutation()

void ResultSet::topPermutation ( std::vector< uint32_t > &  to_sort,
const size_t  n,
const std::function< bool(const uint32_t, const uint32_t)>  compare 
)
staticprivate

Definition at line 787 of file ResultSet.cpp.

Referenced by parallelTop(), and sort().

790  {
791  std::make_heap(to_sort.begin(), to_sort.end(), compare);
792  std::vector<uint32_t> permutation_top;
793  permutation_top.reserve(n);
794  for (size_t i = 0; i < n && !to_sort.empty(); ++i) {
795  permutation_top.push_back(to_sort.front());
796  std::pop_heap(to_sort.begin(), to_sort.end(), compare);
797  to_sort.pop_back();
798  }
799  to_sort.swap(permutation_top);
800 }
+ Here is the caller graph for this function:

◆ unserialize()

static std::unique_ptr<ResultSet> ResultSet::unserialize ( const TSerializedRows &  serialized_rows,
const Executor  
)
static

◆ unserializeCountDistinctColumns()

void ResultSet::unserializeCountDistinctColumns ( const TSerializedRows &  )
private

◆ updateStorageEntryCount()

void ResultSet::updateStorageEntryCount ( const size_t  new_entry_count)
inline

Definition at line 364 of file ResultSet.h.

References File_Namespace::append(), CHECK, anonymous_namespace{TypedDataAccessors.h}::decimal_to_double(), QueryMemoryDescriptor::getQueryDescriptionType(), Projection, ResultSetStorage::query_mem_desc_, ResultSetStorage::ResultSet, and QueryMemoryDescriptor::setEntryCount().

364  {
366  query_mem_desc_.setEntryCount(new_entry_count);
367  CHECK(storage_);
368  storage_->updateEntryCount(new_entry_count);
369  }
void setEntryCount(const size_t val)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:821
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:822
#define CHECK(condition)
Definition: Logger.h:197
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

Friends And Related Function Documentation

◆ ColumnarResults

friend class ColumnarResults
friend

Definition at line 869 of file ResultSet.h.

◆ ResultSetManager

friend class ResultSetManager
friend

Definition at line 867 of file ResultSet.h.

◆ ResultSetRowIterator

friend class ResultSetRowIterator
friend

Definition at line 868 of file ResultSet.h.

Member Data Documentation

◆ appended_storage_

◆ cached_row_count_

std::atomic<ssize_t> ResultSet::cached_row_count_
mutableprivate

Definition at line 856 of file ResultSet.h.

Referenced by append(), ResultSet(), rowCount(), setCachedRowCount(), and sort().

◆ chunk_iters_

std::vector<std::shared_ptr<std::list<ChunkIter> > > ResultSet::chunk_iters_
private

Definition at line 835 of file ResultSet.h.

Referenced by append().

◆ chunks_

std::list<std::shared_ptr<Chunk_NS::Chunk> > ResultSet::chunks_
private

Definition at line 834 of file ResultSet.h.

Referenced by append().

◆ col_buffers_

std::vector<std::vector<std::vector<const int8_t*> > > ResultSet::col_buffers_
private

◆ column_wise_comparator_

std::unique_ptr<ResultSetComparator<ColumnWiseTargetAccessor> > ResultSet::column_wise_comparator_
private

Definition at line 865 of file ResultSet.h.

◆ consistent_frag_sizes_

std::vector<std::vector<int64_t> > ResultSet::consistent_frag_sizes_
private

Definition at line 842 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ crt_row_buff_idx_

size_t ResultSet::crt_row_buff_idx_
mutableprivate

◆ data_mgr_

Data_Namespace::DataMgr* ResultSet::data_mgr_
private

Definition at line 847 of file ResultSet.h.

Referenced by ResultSet(), and syncEstimatorBuffer().

◆ device_id_

const int ResultSet::device_id_
private

◆ device_type_

◆ drop_first_

size_t ResultSet::drop_first_
private

◆ estimator_

const std::shared_ptr<const Analyzer::Estimator> ResultSet::estimator_
private

Definition at line 844 of file ResultSet.h.

Referenced by definitelyHasNoRows(), ResultSet(), and syncEstimatorBuffer().

◆ estimator_buffer_

int8_t* ResultSet::estimator_buffer_
private

Definition at line 845 of file ResultSet.h.

Referenced by getDeviceEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ executor_

◆ explanation_

std::string ResultSet::explanation_
private

Definition at line 854 of file ResultSet.h.

◆ fetched_so_far_

size_t ResultSet::fetched_so_far_
mutableprivate

Definition at line 825 of file ResultSet.h.

Referenced by moveToBegin(), and ResultSet().

◆ frag_offsets_

std::vector<std::vector<std::vector<int64_t> > > ResultSet::frag_offsets_
private

Definition at line 841 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ geo_return_type_

GeoReturnType ResultSet::geo_return_type_
mutableprivate

Definition at line 860 of file ResultSet.h.

Referenced by makeGeoTargetValue(), and ResultSet().

◆ host_estimator_buffer_

int8_t* ResultSet::host_estimator_buffer_
mutableprivate

Definition at line 846 of file ResultSet.h.

Referenced by getHostEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ just_explain_

const bool ResultSet::just_explain_
private

Definition at line 855 of file ResultSet.h.

Referenced by colCount(), definitelyHasNoRows(), getColType(), isExplain(), ResultSet(), and rowCount().

◆ keep_first_

size_t ResultSet::keep_first_
private

◆ lazy_fetch_info_

const std::vector<ColumnLazyFetchInfo> ResultSet::lazy_fetch_info_
private

◆ literal_buffers_

std::vector<std::vector<int8_t> > ResultSet::literal_buffers_
private

Definition at line 838 of file ResultSet.h.

Referenced by append().

◆ permutation_

std::vector<uint32_t> ResultSet::permutation_
private

◆ query_mem_desc_

◆ queue_time_ms_

int64_t ResultSet::queue_time_ms_
private

◆ render_time_ms_

int64_t ResultSet::render_time_ms_
private

Definition at line 831 of file ResultSet.h.

Referenced by getRenderTime(), and ResultSet().

◆ row_iteration_mutex_

std::mutex ResultSet::row_iteration_mutex_
mutableprivate

Definition at line 857 of file ResultSet.h.

Referenced by rowCount().

◆ row_set_mem_owner_

◆ row_wise_comparator_

std::unique_ptr<ResultSetComparator<RowWiseTargetAccessor> > ResultSet::row_wise_comparator_
private

Definition at line 864 of file ResultSet.h.

◆ separate_varlen_storage_valid_

bool ResultSet::separate_varlen_storage_valid_
private

◆ serialized_varlen_buffer_

std::vector<SerializedVarlenBufferStorage> ResultSet::serialized_varlen_buffer_
private

Definition at line 852 of file ResultSet.h.

Referenced by append(), makeGeoTargetValue(), and makeVarlenTargetValue().

◆ storage_

◆ targets_


The documentation for this class was generated from the following files: