OmniSciDB  c07336695a
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  ResultSetComparator
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t *>> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 
void setCachedRowCount (const size_t row_count) const
 
size_t entryCount () const
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr< RowSetMemoryOwnergetRowSetMemOwner () const
 
const std::vector< uint32_t > & getPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit ()
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isFastColumnarConversionPossible () const
 
const std::vector< ColumnLazyFetchInfo > & getLazyFetchInfo () const
 
void setSeparateVarlenStorageValid (const bool val)
 
std::shared_ptr< const std::vector< std::string > > getStringDictionaryPayloadCopy (const int dict_id) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 

Private Types

using BufferSet = std::set< int64_t >
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getColumnInternal (const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< ssize_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const int8_t * > & getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
std::function< bool(const uint32_t, const uint32_t)> createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
 
void sortPermutation (const std::function< bool(const uint32_t, const uint32_t)> compare)
 
std::vector< uint32_t > initPermutationBuffer (const size_t start, const size_t step)
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (BufferSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static void topPermutation (std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
const std::shared_ptr< RowSetMemoryOwnerrow_set_mem_owner_
 
std::vector< uint32_t > permutation_
 
int64_t queue_time_ms_
 
int64_t render_time_ms_
 
const Executorexecutor_
 
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
 
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
 
std::vector< std::vector< int8_t > > literal_buffers_
 
const std::vector< ColumnLazyFetchInfolazy_fetch_info_
 
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
 
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
 
std::vector< std::vector< int64_t > > consistent_frag_sizes_
 
const std::shared_ptr< const Analyzer::Estimatorestimator_
 
int8_t * estimator_buffer_
 
int8_t * host_estimator_buffer_
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector< SerializedVarlenBufferStorageserialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
std::atomic< ssize_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
 
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 

Detailed Description

Definition at line 298 of file ResultSet.h.

Member Typedef Documentation

◆ BufferSet

using ResultSet::BufferSet = std::set<int64_t>
private

Definition at line 765 of file ResultSet.h.

◆ SerializedVarlenBufferStorage

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 803 of file ResultSet.h.

Member Enumeration Documentation

◆ GeoReturnType

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 484 of file ResultSet.h.

484  {
487  WktString,
490  GeoTargetValueGpuPtr
492  };
boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue > GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

◆ ResultSet() [1/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

Definition at line 91 of file ResultSet.cpp.

References CudaAllocator::alloc(), cached_row_count_, checked_calloc(), col_buffers_, consistent_frag_sizes_, crt_row_buff_idx_, data_mgr_, device_id_, device_type_, drop_first_, estimator_, estimator_buffer_, executor_, fetched_so_far_, frag_offsets_, geo_return_type_, Data_Namespace::DataMgr::getCudaMgr(), GPU, host_estimator_buffer_, just_explain_, keep_first_, lazy_fetch_info_, query_mem_desc_, queue_time_ms_, render_time_ms_, row_set_mem_owner_, separate_varlen_storage_valid_, targets_, WktString, and CudaMgr_Namespace::CudaMgr::zeroDeviceMem().

96  : targets_(targets)
97  , device_type_(device_type)
98  , device_id_(-1)
99  , query_mem_desc_(query_mem_desc)
100  , crt_row_buff_idx_(0)
101  , fetched_so_far_(0)
102  , drop_first_(0)
103  , keep_first_(0)
104  , row_set_mem_owner_(row_set_mem_owner)
105  , queue_time_ms_(0)
106  , render_time_ms_(0)
107  , executor_(executor)
108  , estimator_buffer_(nullptr)
109  , host_estimator_buffer_(nullptr)
110  , data_mgr_(nullptr)
112  , just_explain_(false)
113  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:798
GeoReturnType geo_return_type_
Definition: ResultSet.h:813
const Executor * executor_
Definition: ResultSet.h:785
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
size_t keep_first_
Definition: ResultSet.h:780
const bool just_explain_
Definition: ResultSet.h:808
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
size_t drop_first_
Definition: ResultSet.h:779
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:800
int64_t queue_time_ms_
Definition: ResultSet.h:783
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
size_t fetched_so_far_
Definition: ResultSet.h:778
size_t crt_row_buff_idx_
Definition: ResultSet.h:777
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
int64_t render_time_ms_
Definition: ResultSet.h:784
const int device_id_
Definition: ResultSet.h:773
+ Here is the call graph for this function:

◆ ResultSet() [2/5]

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t *>> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

◆ ResultSet() [3/5]

ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)

◆ ResultSet() [4/5]

ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 177 of file ResultSet.cpp.

179  , device_id_(-1)
180  , fetched_so_far_(0)
181  , queue_time_ms_(0)
182  , render_time_ms_(0)
183  , estimator_buffer_(nullptr)
184  , host_estimator_buffer_(nullptr)
186  , explanation_(explanation)
187  , just_explain_(true)
188  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:798
GeoReturnType geo_return_type_
Definition: ResultSet.h:813
const bool just_explain_
Definition: ResultSet.h:808
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
int64_t queue_time_ms_
Definition: ResultSet.h:783
std::string explanation_
Definition: ResultSet.h:807
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
size_t fetched_so_far_
Definition: ResultSet.h:778
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
int64_t render_time_ms_
Definition: ResultSet.h:784
const int device_id_
Definition: ResultSet.h:773

◆ ResultSet() [5/5]

ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 191 of file ResultSet.cpp.

195  , device_id_(-1)
196  , fetched_so_far_(0)
197  , queue_time_ms_(queue_time_ms)
198  , render_time_ms_(render_time_ms)
199  , estimator_buffer_(nullptr)
200  , host_estimator_buffer_(nullptr)
202  , just_explain_(true)
203  , cached_row_count_(-1)
int8_t * estimator_buffer_
Definition: ResultSet.h:798
GeoReturnType geo_return_type_
Definition: ResultSet.h:813
const bool just_explain_
Definition: ResultSet.h:808
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
int64_t queue_time_ms_
Definition: ResultSet.h:783
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
size_t fetched_so_far_
Definition: ResultSet.h:778
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
int64_t render_time_ms_
Definition: ResultSet.h:784
const int device_id_
Definition: ResultSet.h:773

◆ ~ResultSet()

ResultSet::~ResultSet ( )

Definition at line 206 of file ResultSet.cpp.

References appended_storage_, CHECK, CPU, device_type_, estimator_buffer_, host_estimator_buffer_, and storage_.

206  {
207  if (storage_) {
208  CHECK(storage_->getUnderlyingBuffer());
209  if (!storage_->buff_is_provided_) {
210  free(storage_->getUnderlyingBuffer());
211  }
212  }
213  for (auto& storage : appended_storage_) {
214  if (storage && !storage->buff_is_provided_) {
215  free(storage->getUnderlyingBuffer());
216  }
217  }
221  }
222 }
int8_t * estimator_buffer_
Definition: ResultSet.h:798
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187

Member Function Documentation

◆ advanceCursorToNextEntry() [1/2]

void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private

Definition at line 679 of file ResultSetIteration.cpp.

References CHECK_LE, ResultSetRowIterator::crt_row_buff_idx_, drop_first_, entryCount(), ResultSetRowIterator::fetched_so_far_, findStorage(), ResultSetRowIterator::global_entry_idx_, ResultSetRowIterator::global_entry_idx_valid_, keep_first_, and permutation_.

679  {
681  iter.global_entry_idx_valid_ = false;
682  return;
683  }
684 
685  while (iter.crt_row_buff_idx_ < entryCount()) {
686  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
688  const auto storage_lookup_result = findStorage(entry_idx);
689  const auto storage = storage_lookup_result.storage_ptr;
690  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
691  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
692  if (iter.fetched_so_far_ < drop_first_) {
693  ++iter.fetched_so_far_;
694  } else {
695  break;
696  }
697  }
698  ++iter.crt_row_buff_idx_;
699  }
700  if (permutation_.empty()) {
702  } else {
704  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
705  ? iter.crt_row_buff_idx_
707  }
708 
710 
711  if (iter.global_entry_idx_valid_) {
712  ++iter.crt_row_buff_idx_;
713  ++iter.fetched_so_far_;
714  }
715 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:780
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
size_t global_entry_idx_
Definition: ResultSet.h:274
size_t drop_first_
Definition: ResultSet.h:779
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
#define CHECK_LE(x, y)
Definition: Logger.h:198
size_t crt_row_buff_idx_
Definition: ResultSet.h:273
bool global_entry_idx_valid_
Definition: ResultSet.h:275
+ Here is the call graph for this function:

◆ advanceCursorToNextEntry() [2/2]

size_t ResultSet::advanceCursorToNextEntry ( ) const
private

Definition at line 719 of file ResultSetIteration.cpp.

References CHECK_LE, crt_row_buff_idx_, entryCount(), findStorage(), and permutation_.

719  {
720  while (crt_row_buff_idx_ < entryCount()) {
721  const auto entry_idx =
723  const auto storage_lookup_result = findStorage(entry_idx);
724  const auto storage = storage_lookup_result.storage_ptr;
725  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
726  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
727  break;
728  }
730  }
731  if (permutation_.empty()) {
732  return crt_row_buff_idx_;
733  }
737 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
#define CHECK_LE(x, y)
Definition: Logger.h:198
size_t crt_row_buff_idx_
Definition: ResultSet.h:777
+ Here is the call graph for this function:

◆ allocateStorage() [1/3]

const ResultSetStorage * ResultSet::allocateStorage ( ) const

Definition at line 228 of file ResultSet.cpp.

References CHECK, checked_malloc(), device_type_, QueryMemoryDescriptor::getBufferSizeBytes(), query_mem_desc_, storage_, and targets_.

228  {
229  CHECK(!storage_);
230  auto buff = static_cast<int8_t*>(
232  storage_.reset(new ResultSetStorage(targets_, query_mem_desc_, buff, false));
233  return storage_.get();
234 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:40
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the call graph for this function:

◆ allocateStorage() [2/3]

const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &   
) const

◆ allocateStorage() [3/3]

const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const

◆ append()

void ResultSet::append ( ResultSet that)

Definition at line 263 of file ResultSet.cpp.

References appended_storage_, cached_row_count_, CHECK, CHECK_EQ, chunk_iters_, chunks_, col_buffers_, consistent_frag_sizes_, frag_offsets_, QueryMemoryDescriptor::getEntryCount(), literal_buffers_, query_mem_desc_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and QueryMemoryDescriptor::setEntryCount().

263  {
265  if (!that.storage_) {
266  return;
267  }
268  appended_storage_.push_back(std::move(that.storage_));
271  appended_storage_.back()->query_mem_desc_.getEntryCount());
272  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
273  col_buffers_.insert(
274  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
275  frag_offsets_.insert(
276  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
278  that.consistent_frag_sizes_.begin(),
279  that.consistent_frag_sizes_.end());
280  chunk_iters_.insert(
281  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
283  CHECK(that.separate_varlen_storage_valid_);
285  that.serialized_varlen_buffer_.begin(),
286  that.serialized_varlen_buffer_.end());
287  }
288  for (auto& buff : that.literal_buffers_) {
289  literal_buffers_.push_back(std::move(buff));
290  }
291 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
void setEntryCount(const size_t val)
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:788
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:805
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:787
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:791
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:795
#define CHECK(condition)
Definition: Logger.h:187
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:794
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
+ Here is the call graph for this function:

◆ baselineSort()

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ canUseFastBaselineSort()

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ colCount()

size_t ResultSet::colCount ( ) const

Definition at line 297 of file ResultSet.cpp.

References just_explain_, and targets_.

297  {
298  return just_explain_ ? 1 : targets_.size();
299 }
const bool just_explain_
Definition: ResultSet.h:808
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771

◆ copyColumnIntoBuffer()

void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copy its content into a contiguous output_buffer

Definition at line 1105 of file ResultSetIteration.cpp.

References appended_storage_, CHECK, CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isFastColumnarConversionPossible(), query_mem_desc_, and storage_.

1107  {
1109  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1110  CHECK(output_buffer_size > 0);
1111  CHECK(output_buffer);
1112  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1113  size_t out_buff_offset = 0;
1114 
1115  // the main storage:
1116  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1117  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1118  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1119  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1120  CHECK(crt_buffer_size <= output_buffer_size);
1121  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1122 
1123  out_buff_offset += crt_buffer_size;
1124 
1125  // the appended storages:
1126  for (size_t i = 0; i < appended_storage_.size(); i++) {
1127  CHECK_LT(out_buff_offset, output_buffer_size);
1128  const size_t crt_storage_row_count =
1129  appended_storage_[i]->query_mem_desc_.getEntryCount();
1130  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1131  const size_t column_offset =
1132  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1133  const int8_t* storage_buffer =
1134  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1135  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1136  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1137 
1138  out_buff_offset += crt_buffer_size;
1139  }
1140 }
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:197
#define CHECK(condition)
Definition: Logger.h:187
bool isFastColumnarConversionPossible() const
Definition: ResultSet.h:507
+ Here is the call graph for this function:

◆ create_active_buffer_set()

void ResultSet::create_active_buffer_set ( BufferSet count_distinct_active_buffer_set) const
private

◆ createComparator()

std::function<bool(const uint32_t, const uint32_t)> ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const bool  use_heap 
)
inlineprivate

Definition at line 710 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

Referenced by parallelTop(), and sort().

712  {
715  std::make_unique<ResultSetComparator<ColumnWiseTargetAccessor>>(
716  order_entries, use_heap, this);
717  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
718  return (*this->column_wise_comparator_)(lhs, rhs);
719  };
720  } else {
721  row_wise_comparator_ = std::make_unique<ResultSetComparator<RowWiseTargetAccessor>>(
722  order_entries, use_heap, this);
723  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
724  return (*this->row_wise_comparator_)(lhs, rhs);
725  };
726  }
727  }
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
Definition: ResultSet.h:818
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
Definition: ResultSet.h:817
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ definitelyHasNoRows()

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 383 of file ResultSet.cpp.

References estimator_, just_explain_, and storage_.

383  {
384  return !storage_ && !estimator_ && !just_explain_;
385 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
const bool just_explain_
Definition: ResultSet.h:808
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:797

◆ doBaselineSort()

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ dropFirstN()

void ResultSet::dropFirstN ( const size_t  n)

Definition at line 86 of file ResultSet.cpp.

References CHECK_EQ.

86  {
88  drop_first_ = n;
89 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
size_t drop_first_
Definition: ResultSet.h:779
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809

◆ entryCount()

size_t ResultSet::entryCount ( ) const

Definition at line 739 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

Referenced by advanceCursorToNextEntry(), parallelRowCount(), rowCount(), and sort().

739  {
740  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
741 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fillOneEntry()

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 446 of file ResultSet.h.

References CHECK.

446  {
447  CHECK(storage_);
448  if (storage_->query_mem_desc_.didOutputColumnar()) {
449  storage_->fillOneEntryColWise(entry);
450  } else {
451  storage_->fillOneEntryRowWise(entry);
452  }
453  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK(condition)
Definition: Logger.h:187

◆ findStorage()

ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 615 of file ResultSet.cpp.

References appended_storage_, CHECK_LE, getStorageIndex(), and storage_.

Referenced by advanceCursorToNextEntry(), initPermutationBuffer(), and makeGeoTargetValue().

615  {
616  ssize_t stg_idx{-1};
617  size_t fixedup_entry_idx{entry_idx};
618  std::tie(stg_idx, fixedup_entry_idx) = getStorageIndex(entry_idx);
619  CHECK_LE(ssize_t(0), stg_idx);
620  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
621  fixedup_entry_idx,
622  static_cast<size_t>(stg_idx)};
623 }
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
std::pair< ssize_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:594
#define CHECK_LE(x, y)
Definition: Logger.h:198
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixupCountDistinctPointers()

void ResultSet::fixupCountDistinctPointers ( )
private

◆ fixupQueryMemoryDescriptor()

QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 452 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and QueryMemoryDescriptor::resetGroupColWidths().

Referenced by QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

453  {
454  auto query_mem_desc_copy = query_mem_desc;
455  query_mem_desc_copy.resetGroupColWidths(
456  std::vector<int8_t>(query_mem_desc_copy.groupColWidthsSize(), 8));
457  if (query_mem_desc.didOutputColumnar()) {
458  return query_mem_desc_copy;
459  }
460  query_mem_desc_copy.alignPaddedSlots();
461  return query_mem_desc_copy;
462 }
void resetGroupColWidths(const std::vector< int8_t > &new_group_col_widths)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getBufferSizeBytes()

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 743 of file ResultSetIteration.cpp.

References CHECK, and storage_.

743  {
744  CHECK(storage_);
745  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
746 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK(condition)
Definition: Logger.h:187

◆ getColType()

SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 301 of file ResultSet.cpp.

References CHECK_LT, just_explain_, kAVG, kDOUBLE, kTEXT, and targets_.

301  {
302  if (just_explain_) {
303  return SQLTypeInfo(kTEXT, false);
304  }
305  CHECK_LT(col_idx, targets_.size());
306  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
307  : targets_[col_idx].sql_type;
308 }
const bool just_explain_
Definition: ResultSet.h:808
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities > SQLTypeInfo
Definition: sqltypes.h:819
#define CHECK_LT(x, y)
Definition: Logger.h:197
Definition: sqltypes.h:54
Definition: sqldefs.h:71

◆ getColumnFrag()

const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1076 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1078  {
1079  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1080  if (col_buffers_[storage_idx].size() > 1) {
1081  int64_t frag_id = 0;
1082  int64_t local_idx = global_idx;
1083  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1084  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1085  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1086  } else {
1087  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1088  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1089  CHECK_LE(local_idx, global_idx);
1090  }
1091  CHECK_GE(frag_id, int64_t(0));
1092  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1093  global_idx = local_idx;
1094  return col_buffers_[storage_idx][frag_id];
1095  } else {
1096  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1097  return col_buffers_[storage_idx][0];
1098  }
1099 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
#define CHECK_GE(x, y)
Definition: Logger.h:200
#define CHECK_LT(x, y)
Definition: Logger.h:197
#define CHECK_LE(x, y)
Definition: Logger.h:198
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:795
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:794
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getColumnInternal()

InternalTargetValue ResultSet::getColumnInternal ( const int8_t *  buff,
const size_t  entry_idx,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

◆ getCurrentRowBufferIndex()

size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 256 of file ResultSet.cpp.

References crt_row_buff_idx_.

256  {
257  if (crt_row_buff_idx_ == 0) {
258  throw std::runtime_error("current row buffer iteration index is undefined");
259  }
260  return crt_row_buff_idx_ - 1;
261 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:777

◆ getDataManager()

Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private

◆ getDeviceEstimatorBuffer()

int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 401 of file ResultSet.cpp.

References CHECK, device_type_, estimator_buffer_, and GPU.

401  {
403  return estimator_buffer_;
404 }
int8_t * estimator_buffer_
Definition: ResultSet.h:798
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187

◆ getDeviceId()

int ResultSet::getDeviceId ( ) const

Definition at line 448 of file ResultSet.cpp.

References device_id_.

448  {
449  return device_id_;
450 }
const int device_id_
Definition: ResultSet.h:773

◆ getDeviceType()

ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 224 of file ResultSet.cpp.

References device_type_.

224  {
225  return device_type_;
226 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772

◆ getDistinctBufferRefFromBufferRowwise()

int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private

◆ getGeoReturnType()

GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 493 of file ResultSet.h.

493 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:813

◆ getGpuCount()

int ResultSet::getGpuCount ( ) const
private

Referenced by sort().

+ Here is the caller graph for this function:

◆ getHostEstimatorBuffer()

int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 406 of file ResultSet.cpp.

References host_estimator_buffer_.

406  {
407  return host_estimator_buffer_;
408 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799

◆ getLazyFetchInfo()

const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 512 of file ResultSet.h.

512  {
513  return lazy_fetch_info_;
514  }
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792

◆ getLimit()

size_t ResultSet::getLimit ( )

Definition at line 857 of file ResultSet.cpp.

References keep_first_.

857  {
858  return keep_first_;
859 }
size_t keep_first_
Definition: ResultSet.h:780

◆ getNDVEstimator()

size_t ResultSet::getNDVEstimator ( ) const

Definition at line 22 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK, and CHECK_LE.

22  {
23  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
25  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
26  const auto total_bits = estimator_->getBufferSize() * 8;
27  CHECK_LE(bits_set, total_bits);
28  const auto unset_bits = total_bits - bits_set;
29  const auto ratio = static_cast<double>(unset_bits) / total_bits;
30  if (ratio == 0.) {
31  throw std::runtime_error("Failed to get a high quality cardinality estimation");
32  }
33  return -static_cast<double>(total_bits) * log(ratio);
34 }
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:797
#define CHECK_LE(x, y)
Definition: Logger.h:198
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
#define CHECK(condition)
Definition: Logger.h:187
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37
+ Here is the call graph for this function:

◆ getNextRow()

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 279 of file ResultSetIteration.cpp.

280  {
281  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
282  if (!storage_ && !just_explain_) {
283  return {};
284  }
285  return getNextRowUnlocked(translate_strings, decimal_to_double);
286 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:810
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:808

◆ getNextRowImpl()

std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 307 of file ResultSetIteration.cpp.

References CHECK, and CHECK_EQ.

308  {
309  auto entry_buff_idx = advanceCursorToNextEntry();
311  return {};
312  }
313 
314  if (crt_row_buff_idx_ >= entryCount()) {
316  return {};
317  }
318  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
319  CHECK(!row.empty());
321  ++fetched_so_far_;
322 
323  return row;
324 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:780
std::vector< TargetValue > getRowAt(const size_t index) const
size_t drop_first_
Definition: ResultSet.h:779
#define CHECK(condition)
Definition: Logger.h:187
size_t fetched_so_far_
Definition: ResultSet.h:778
size_t crt_row_buff_idx_
Definition: ResultSet.h:777
size_t advanceCursorToNextEntry() const

◆ getNextRowUnlocked()

std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 288 of file ResultSetIteration.cpp.

Referenced by rowCount().

290  {
291  if (just_explain_) {
292  if (fetched_so_far_) {
293  return {};
294  }
295  fetched_so_far_ = 1;
296  return {explanation_};
297  }
298  while (fetched_so_far_ < drop_first_) {
299  const auto row = getNextRowImpl(translate_strings, decimal_to_double);
300  if (row.empty()) {
301  return row;
302  }
303  }
304  return getNextRowImpl(translate_strings, decimal_to_double);
305 }
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:808
size_t drop_first_
Definition: ResultSet.h:779
std::string explanation_
Definition: ResultSet.h:807
size_t fetched_so_far_
Definition: ResultSet.h:778
+ Here is the caller graph for this function:

◆ getOneColRow()

OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 217 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK, get_key_bytes_rowwise(), getRowAt(), and row_ptr_rowwise().

217  {
218  const auto storage_lookup_result = findStorage(global_entry_idx);
219  const auto storage = storage_lookup_result.storage_ptr;
220  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
221  if (storage->isEmptyEntry(local_entry_idx)) {
222  return {0, false};
223  }
224  const auto buff = storage->buff_;
225  CHECK(buff);
227  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
228  const auto key_bytes_with_padding =
230  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
231  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
232  keys_ptr,
233  global_entry_idx,
234  targets_.front(),
235  0,
236  0,
237  false,
238  false,
239  false);
240  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
241  CHECK(scalar_tv);
242  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
243  CHECK(ival_ptr);
244  return {*ival_ptr, true};
245 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
#define CHECK(condition)
Definition: Logger.h:187
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:

◆ getPermutationBuffer()

const std::vector< uint32_t > & ResultSet::getPermutationBuffer ( ) const

Definition at line 551 of file ResultSet.cpp.

References permutation_.

551  {
552  return permutation_;
553 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782

◆ getQueryMemDesc()

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 387 of file ResultSet.cpp.

References CHECK, and storage_.

387  {
388  CHECK(storage_);
389  return storage_->query_mem_desc_;
390 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK(condition)
Definition: Logger.h:187

◆ getQueueTime()

int64_t ResultSet::getQueueTime ( ) const

Definition at line 427 of file ResultSet.cpp.

References queue_time_ms_.

427  {
428  return queue_time_ms_;
429 }
int64_t queue_time_ms_
Definition: ResultSet.h:783

◆ getRenderTime()

int64_t ResultSet::getRenderTime ( ) const

Definition at line 431 of file ResultSet.cpp.

References render_time_ms_.

431  {
432  return render_time_ms_;
433 }
int64_t render_time_ms_
Definition: ResultSet.h:784

◆ getRowAt() [1/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const

Referenced by get_byteoff_of_slot(), and getOneColRow().

+ Here is the caller graph for this function:

◆ getRowAt() [2/3]

TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const

◆ getRowAt() [3/3]

std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private

◆ getRowAtNoTranslations()

std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 256 of file ResultSetIteration.cpp.

258  {
259  if (logical_index >= entryCount()) {
260  return {};
261  }
262  const auto entry_idx =
263  permutation_.empty() ? logical_index : permutation_[logical_index];
264  return getRowAt(entry_idx, false, false, false, targets_to_skip);
265 }
size_t entryCount() const
std::vector< TargetValue > getRowAt(const size_t index) const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782

◆ getRowSetMemOwner()

std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 467 of file ResultSet.h.

467  {
468  return row_set_mem_owner_;
469  }
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781

◆ getStorage()

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 293 of file ResultSet.cpp.

References storage_.

293  {
294  return storage_.get();
295 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775

◆ getStorageIndex()

std::pair< ssize_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Definition at line 594 of file ResultSet.cpp.

References appended_storage_, CHECK, CHECK_NE, and storage_.

Referenced by findStorage(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

594  {
595  size_t fixedup_entry_idx = entry_idx;
596  auto entry_count = storage_->query_mem_desc_.getEntryCount();
597  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
598  if (fixedup_entry_idx < entry_count) {
599  return {0, fixedup_entry_idx};
600  }
601  fixedup_entry_idx -= entry_count;
602  for (size_t i = 0; i < appended_storage_.size(); ++i) {
603  const auto& desc = appended_storage_[i]->query_mem_desc_;
604  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
605  entry_count = desc.getEntryCount();
606  if (fixedup_entry_idx < entry_count) {
607  return {i + 1, fixedup_entry_idx};
608  }
609  fixedup_entry_idx -= entry_count;
610  }
611  CHECK(false);
612  return {-1, entry_idx};
613 }
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK_NE(x, y)
Definition: Logger.h:196
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the caller graph for this function:

◆ getStringDictionaryPayloadCopy()

std::shared_ptr< const std::vector< std::string > > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 861 of file ResultSet.cpp.

References CHECK, executor_, and row_set_mem_owner_.

862  {
863  CHECK(executor_);
864  const auto sdp =
865  executor_->getStringDictionaryProxy(dict_id, row_set_mem_owner_, false);
866  return sdp->getDictionary()->copyStrings();
867 }
const Executor * executor_
Definition: ResultSet.h:785
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
#define CHECK(condition)
Definition: Logger.h:187

◆ getTargetInfos()

const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 392 of file ResultSet.cpp.

References targets_.

392  {
393  return targets_;
394 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771

◆ getTargetInitVals()

const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 396 of file ResultSet.cpp.

References CHECK, and storage_.

396  {
397  CHECK(storage_);
398  return storage_->target_init_vals_;
399 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK(condition)
Definition: Logger.h:187

◆ getTargetValueFromBufferColwise()

TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1692 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK, CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfoCore< TYPE_FACET_PACK >::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1702  {
1704  const auto col1_ptr = col_ptr;
1705  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1706  const auto next_col_ptr =
1707  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1708  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1709  is_real_str_or_array(target_info))
1710  ? next_col_ptr
1711  : nullptr;
1712  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1713  is_real_str_or_array(target_info))
1714  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1715  : 0;
1716 
1717  // TODO(Saman): add required logics for count distinct
1718  // geospatial target values:
1719  if (target_info.sql_type.is_geometry()) {
1720  return makeGeoTargetValue(
1721  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1722  }
1723 
1724  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1725  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1726  CHECK(col2_ptr);
1727  CHECK(compact_sz2);
1728  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1729  return target_info.agg_kind == kAVG
1730  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1731  : makeVarlenTargetValue(ptr1,
1732  compact_sz1,
1733  ptr2,
1734  compact_sz2,
1735  target_info,
1736  target_logical_idx,
1737  translate_strings,
1738  global_entry_idx);
1739  }
1741  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1742  return makeTargetValue(ptr1,
1743  compact_sz1,
1744  target_info,
1745  target_logical_idx,
1746  translate_strings,
1748  global_entry_idx);
1749  }
1750  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1751  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1752  CHECK_GE(key_idx, 0);
1753  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1754  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1755  key_width,
1756  target_info,
1757  target_logical_idx,
1758  translate_strings,
1760  global_entry_idx);
1761 }
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define CHECK_GE(x, y)
Definition: Logger.h:200
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
bool is_agg
Definition: TargetInfo.h:40
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:187
bool is_geometry() const
Definition: sqltypes.h:458
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:71
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getTargetValueFromBufferRowwise()

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1765 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, checked_malloc(), QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfoCore< TYPE_FACET_PACK >::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfoCore< TYPE_FACET_PACK >::is_array(), is_distinct_target(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_geometry(), is_real_str_or_array(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1774  {
1775  if (UNLIKELY(fixup_count_distinct_pointers)) {
1776  if (is_distinct_target(target_info)) {
1777  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
1778  const auto remote_ptr = *count_distinct_ptr_ptr;
1779  if (remote_ptr) {
1780  const auto ptr = storage_->mappedPtr(remote_ptr);
1781  if (ptr) {
1782  *count_distinct_ptr_ptr = ptr;
1783  } else {
1784  // need to create a zero filled buffer for this remote_ptr
1785  const auto& count_distinct_desc =
1786  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
1787  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
1788  ? count_distinct_desc.bitmapSizeBytes()
1789  : count_distinct_desc.bitmapPaddedSizeBytes();
1790  auto count_distinct_buffer =
1791  static_cast<int8_t*>(checked_malloc(bitmap_byte_sz));
1792  memset(count_distinct_buffer, 0, bitmap_byte_sz);
1793  row_set_mem_owner_->addCountDistinctBuffer(
1794  count_distinct_buffer, bitmap_byte_sz, true);
1795  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
1796  }
1797  }
1798  }
1799  return int64_t(0);
1800  }
1801  if (target_info.sql_type.is_geometry()) {
1802  return makeGeoTargetValue(
1803  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
1804  }
1805 
1806  auto ptr1 = rowwise_target_ptr;
1807  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1809  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
1810  // Single column perfect hash group by can utilize one slot for both the key and the
1811  // target value if both values fit in 8 bytes. Use the target value actual size for
1812  // this case. If they don't, the target value should be 8 bytes, so we can still use
1813  // the actual size rather than the compact size.
1814  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
1815  }
1816 
1817  // logic for deciding width of column
1818  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1819  const auto ptr2 =
1820  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1821  int8_t compact_sz2 = 0;
1822  // Skip reading the second slot if we have a none encoded string and are using
1823  // the none encoded strings buffer attached to ResultSetStorage
1825  (target_info.sql_type.is_array() ||
1826  (target_info.sql_type.is_string() &&
1827  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
1828  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
1829  }
1830  if (separate_varlen_storage_valid_ && target_info.is_agg) {
1831  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
1832  }
1833  CHECK(ptr2);
1834  return target_info.agg_kind == kAVG
1835  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1836  : makeVarlenTargetValue(ptr1,
1837  compact_sz1,
1838  ptr2,
1839  compact_sz2,
1840  target_info,
1841  target_logical_idx,
1842  translate_strings,
1843  entry_buff_idx);
1844  }
1846  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1847  return makeTargetValue(ptr1,
1848  compact_sz1,
1849  target_info,
1850  target_logical_idx,
1851  translate_strings,
1853  entry_buff_idx);
1854  }
1855  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1856  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
1857  return makeTargetValue(ptr1,
1858  key_width,
1859  target_info,
1860  target_logical_idx,
1861  translate_strings,
1863  entry_buff_idx);
1864 }
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:327
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
bool is_array() const
Definition: sqltypes.h:454
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
bool is_agg
Definition: TargetInfo.h:40
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:40
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:116
size_t targetGroupbyIndicesSize() const
SQLAgg agg_kind
Definition: TargetInfo.h:41
#define UNLIKELY(x)
Definition: likely.h:20
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
#define CHECK(condition)
Definition: Logger.h:187
bool is_geometry() const
Definition: sqltypes.h:458
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
Definition: sqldefs.h:71
bool is_string() const
Definition: sqltypes.h:446
bool isSingleColumnGroupByWithPerfectHash() const
size_t getEffectiveKeyWidth() const
+ Here is the call graph for this function:

◆ getVarlenOrderEntry()

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 613 of file ResultSetIteration.cpp.

References CHECK, copy_from_gpu(), CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), GPU, query_mem_desc_, and row_set_mem_owner_.

614  {
615  char* host_str_ptr{nullptr};
616  std::vector<int8_t> cpu_buffer;
618  cpu_buffer.resize(str_len);
619  const auto executor = query_mem_desc_.getExecutor();
620  CHECK(executor);
621  auto& data_mgr = executor->catalog_->getDataMgr();
622  copy_from_gpu(&data_mgr,
623  &cpu_buffer[0],
624  static_cast<CUdeviceptr>(str_ptr),
625  str_len,
626  device_id_);
627  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
628  } else {
630  host_str_ptr = reinterpret_cast<char*>(str_ptr);
631  }
632  std::string str(host_str_ptr, str_len);
633  return InternalTargetValue(row_set_mem_owner_->addString(str));
634 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
const Executor * getExecutor() const
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187
const int device_id_
Definition: ResultSet.h:773
+ Here is the call graph for this function:

◆ holdChunkIterators()

void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 460 of file ResultSet.h.

460  {
461  chunk_iters_.push_back(chunk_iters);
462  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:788

◆ holdChunks()

void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 457 of file ResultSet.h.

457  {
458  chunks_ = chunks;
459  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:787

◆ holdLiterals()

void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 463 of file ResultSet.h.

463  {
464  literal_buffers_.push_back(std::move(literal_buff));
465  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:791

◆ initializeStorage()

void ResultSet::initializeStorage ( ) const

Definition at line 1121 of file ResultSetReduction.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and ResultSetStorage::query_mem_desc_.

1121  {
1123  storage_->initializeColWise();
1124  } else {
1125  storage_->initializeRowWise();
1126  }
1127 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
+ Here is the call graph for this function:

◆ initPermutationBuffer()

std::vector< uint32_t > ResultSet::initPermutationBuffer ( const size_t  start,
const size_t  step 
)
private

Definition at line 533 of file ResultSet.cpp.

References CHECK, CHECK_NE, findStorage(), QueryMemoryDescriptor::getEntryCount(), and query_mem_desc_.

Referenced by parallelTop(), and sort().

534  {
535  CHECK_NE(size_t(0), step);
536  std::vector<uint32_t> permutation;
537  const auto total_entries = query_mem_desc_.getEntryCount();
538  permutation.reserve(total_entries / step);
539  for (size_t i = start; i < total_entries; i += step) {
540  const auto storage_lookup_result = findStorage(i);
541  const auto lhs_storage = storage_lookup_result.storage_ptr;
542  const auto off = storage_lookup_result.fixedup_entry_idx;
543  CHECK(lhs_storage);
544  if (!lhs_storage->isEmptyEntry(off)) {
545  permutation.push_back(i);
546  }
547  }
548  return permutation;
549 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define CHECK_NE(x, y)
Definition: Logger.h:196
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isExplain()

bool ResultSet::isExplain ( ) const

Definition at line 444 of file ResultSet.cpp.

References just_explain_.

444  {
445  return just_explain_;
446 }
const bool just_explain_
Definition: ResultSet.h:808

◆ isFastColumnarConversionPossible()

bool ResultSet::isFastColumnarConversionPossible ( ) const
inline

Definition at line 507 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getQueryDescriptionType(), Projection, and ResultSetStorage::query_mem_desc_.

Referenced by copyColumnIntoBuffer().

507  {
508  return query_mem_desc_.didOutputColumnar() && permutation_.empty() &&
510  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isGeoColOnGpu()

bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1269 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1269  {
1270  // This should match the logic in makeGeoTargetValue which ultimately calls
1271  // fetch_data_from_gpu when the geo column is on the device.
1272  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1273  // utility function that handles this logic in one place
1274  CHECK_LT(col_idx, targets_.size());
1275  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1276  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1277  " is not a geo column. It is of type " +
1278  targets_[col_idx].sql_type.get_type_name() + ".");
1279  }
1280 
1281  const auto& target_info = targets_[col_idx];
1282  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1283  return false;
1284  }
1285 
1286  if (!lazy_fetch_info_.empty()) {
1287  CHECK_LT(col_idx, lazy_fetch_info_.size());
1288  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1289  return false;
1290  }
1291  }
1292 
1294 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
#define CHECK_LT(x, y)
Definition: Logger.h:197
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
#define IS_GEO(T)
Definition: sqltypes.h:165
+ Here is the call graph for this function:

◆ isNull()

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 1959 of file ResultSetIteration.cpp.

References CHECK, SQLTypeInfoCore< TYPE_FACET_PACK >::get_notnull(), InternalTargetValue::i1, InternalTargetValue::i2, InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), NULL_DOUBLE, null_val_bit_pattern(), and pair_to_double().

Referenced by ResultSet::ResultSetComparator< BUFFER_ITERATOR_TYPE >::operator()().

1961  {
1962  if (ti.get_notnull()) {
1963  return false;
1964  }
1965  if (val.isInt()) {
1966  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
1967  }
1968  if (val.isPair()) {
1969  return !val.i2 ||
1970  pair_to_double({val.i1, val.i2}, ti, float_argument_input) == NULL_DOUBLE;
1971  }
1972  if (val.isStr()) {
1973  return !val.i1;
1974  }
1975  CHECK(val.isNull());
1976  return true;
1977 }
#define NULL_DOUBLE
Definition: sqltypes.h:177
bool isNull() const
Definition: TargetValue.h:69
bool isPair() const
Definition: TargetValue.h:67
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:326
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool isStr() const
Definition: TargetValue.h:71
bool isInt() const
Definition: TargetValue.h:65
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ isPermutationBufferEmpty()

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 472 of file ResultSet.h.

472 { return permutation_.empty(); };
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782

◆ isRowAtEmpty()

bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 267 of file ResultSetIteration.cpp.

Referenced by parallelRowCount().

267  {
268  if (logical_index >= entryCount()) {
269  return true;
270  }
271  const auto entry_idx =
272  permutation_.empty() ? logical_index : permutation_[logical_index];
273  const auto storage_lookup_result = findStorage(entry_idx);
274  const auto storage = storage_lookup_result.storage_ptr;
275  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
276  return storage->isEmptyEntry(local_entry_idx);
277 }
size_t entryCount() const
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
+ Here is the caller graph for this function:

◆ isTruncated()

bool ResultSet::isTruncated ( ) const

Definition at line 440 of file ResultSet.cpp.

References drop_first_, and keep_first_.

440  {
441  return keep_first_ + drop_first_;
442 }
size_t keep_first_
Definition: ResultSet.h:780
size_t drop_first_
Definition: ResultSet.h:779

◆ keepFirstN()

void ResultSet::keepFirstN ( const size_t  n)

Definition at line 81 of file ResultSet.cpp.

References CHECK_EQ.

81  {
83  keep_first_ = n;
84 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
size_t keep_first_
Definition: ResultSet.h:780
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809

◆ lazyReadInt()

int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 636 of file ResultSetIteration.cpp.

References CHECK, CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

638  {
639  if (!lazy_fetch_info_.empty()) {
640  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
641  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
642  if (col_lazy_fetch.is_lazily_fetched) {
643  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
644  col_buffers_.size());
645  int64_t ival_copy = ival;
646  auto& frag_col_buffers =
647  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
648  target_logical_idx,
649  ival_copy);
650  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
651  CHECK_LT(target_logical_idx, targets_.size());
652  const TargetInfo& target_info = targets_[target_logical_idx];
653  CHECK(!target_info.is_agg);
654  if (target_info.sql_type.is_string() &&
655  target_info.sql_type.get_compression() == kENCODING_NONE) {
656  VarlenDatum vd;
657  bool is_end{false};
659  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
660  storage_lookup_result.fixedup_entry_idx,
661  false,
662  &vd,
663  &is_end);
664  CHECK(!is_end);
665  if (vd.is_null) {
666  return 0;
667  }
668  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
669  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
670  }
671  return lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
672  }
673  }
674  return ival;
675 }
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:73
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:72
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
#define CHECK_LT(x, y)
Definition: Logger.h:197
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
#define CHECK(condition)
Definition: Logger.h:187
size_t length
Definition: sqltypes.h:71
+ Here is the call graph for this function:

◆ makeGeoTargetValue()

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1300 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK, CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfoCore< TYPE_FACET_PACK >::get_type(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfoCore< TYPE_FACET_PACK >::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1304  {
1305  CHECK(target_info.sql_type.is_geometry());
1306 
1307  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1308  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1309  };
1310 
1311  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1312  const auto storage_info = findStorage(entry_buff_idx);
1313  auto crt_geo_col_ptr = geo_target_ptr;
1314  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1315  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1316  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1317  }
1318  // adjusting the column pointer to represent a pointer to the geo target value
1319  return crt_geo_col_ptr +
1320  storage_info.fixedup_entry_idx *
1321  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1322  slot_idx + range);
1323  };
1324 
1325  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1327  ? getNextTargetBufferColWise(slot_idx, range)
1328  : getNextTargetBufferRowWise(slot_idx, range);
1329  };
1330 
1331  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1332  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1334  };
1335 
1336  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1337  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1339  };
1340 
1341  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1342  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1344  };
1345 
1346  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1347  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1349  };
1350 
1351  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1352  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1354  };
1355 
1356  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1357  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1359  };
1360 
1361  auto getFragColBuffers = [&]() -> decltype(auto) {
1362  const auto storage_idx = getStorageIndex(entry_buff_idx);
1363  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1364  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1365  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1366  };
1367 
1368  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1369 
1370  auto getDataMgr = [&]() {
1371  auto executor = query_mem_desc_.getExecutor();
1372  CHECK(executor);
1373  auto& data_mgr = executor->catalog_->getDataMgr();
1374  return &data_mgr;
1375  };
1376 
1377  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1378  const auto storage_idx = getStorageIndex(entry_buff_idx);
1379  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1380  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1381  return varlen_buffer;
1382  };
1383 
1384  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1385  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1386  return TargetValue(nullptr);
1387  }
1388 
1389  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1390  if (!lazy_fetch_info_.empty()) {
1391  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1392  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1393  }
1394 
1395  switch (target_info.sql_type.get_type()) {
1396  case kPOINT: {
1397  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1398  const auto& varlen_buffer = getSeparateVarlenStorage();
1399  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1400  varlen_buffer.size());
1401 
1402  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1403  target_info.sql_type,
1405  nullptr,
1406  false,
1407  device_id_,
1408  reinterpret_cast<int64_t>(
1409  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1410  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1411  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1412  const auto& frag_col_buffers = getFragColBuffers();
1413  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1414  target_info.sql_type,
1416  frag_col_buffers[col_lazy_fetch->local_col_id],
1417  getCoordsDataPtr(geo_target_ptr));
1418  } else {
1419  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1420  target_info.sql_type,
1422  is_gpu_fetch ? getDataMgr() : nullptr,
1423  is_gpu_fetch,
1424  device_id_,
1425  getCoordsDataPtr(geo_target_ptr),
1426  getCoordsLength(geo_target_ptr));
1427  }
1428  break;
1429  }
1430  case kLINESTRING: {
1431  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1432  const auto& varlen_buffer = getSeparateVarlenStorage();
1433  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1434  varlen_buffer.size());
1435 
1436  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1437  target_info.sql_type,
1439  nullptr,
1440  false,
1441  device_id_,
1442  reinterpret_cast<int64_t>(
1443  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1444  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1445  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1446  const auto& frag_col_buffers = getFragColBuffers();
1447  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1448  target_info.sql_type,
1450  frag_col_buffers[col_lazy_fetch->local_col_id],
1451  getCoordsDataPtr(geo_target_ptr));
1452  } else {
1453  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1454  target_info.sql_type,
1456  is_gpu_fetch ? getDataMgr() : nullptr,
1457  is_gpu_fetch,
1458  device_id_,
1459  getCoordsDataPtr(geo_target_ptr),
1460  getCoordsLength(geo_target_ptr));
1461  }
1462  break;
1463  }
1464  case kPOLYGON: {
1465  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1466  const auto& varlen_buffer = getSeparateVarlenStorage();
1467  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1468  varlen_buffer.size());
1469 
1470  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1471  target_info.sql_type,
1473  nullptr,
1474  false,
1475  device_id_,
1476  reinterpret_cast<int64_t>(
1477  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1478  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1479  reinterpret_cast<int64_t>(
1480  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1481  static_cast<int64_t>(
1482  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1483  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1484  const auto& frag_col_buffers = getFragColBuffers();
1485 
1486  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1487  target_info.sql_type,
1489  frag_col_buffers[col_lazy_fetch->local_col_id],
1490  getCoordsDataPtr(geo_target_ptr),
1491  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1492  getCoordsDataPtr(geo_target_ptr));
1493  } else {
1494  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1495  target_info.sql_type,
1497  is_gpu_fetch ? getDataMgr() : nullptr,
1498  is_gpu_fetch,
1499  device_id_,
1500  getCoordsDataPtr(geo_target_ptr),
1501  getCoordsLength(geo_target_ptr),
1502  getRingSizesPtr(geo_target_ptr),
1503  getRingSizesLength(geo_target_ptr) * 4);
1504  }
1505  break;
1506  }
1507  case kMULTIPOLYGON: {
1508  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1509  const auto& varlen_buffer = getSeparateVarlenStorage();
1510  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1511  varlen_buffer.size());
1512 
1513  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1514  target_info.sql_type,
1516  nullptr,
1517  false,
1518  device_id_,
1519  reinterpret_cast<int64_t>(
1520  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1521  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1522  reinterpret_cast<int64_t>(
1523  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1524  static_cast<int64_t>(
1525  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1526  reinterpret_cast<int64_t>(
1527  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1528  static_cast<int64_t>(
1529  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1530  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1531  const auto& frag_col_buffers = getFragColBuffers();
1532 
1533  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1534  target_info.sql_type,
1536  frag_col_buffers[col_lazy_fetch->local_col_id],
1537  getCoordsDataPtr(geo_target_ptr),
1538  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1539  getCoordsDataPtr(geo_target_ptr),
1540  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1541  getCoordsDataPtr(geo_target_ptr));
1542  } else {
1543  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1544  target_info.sql_type,
1546  is_gpu_fetch ? getDataMgr() : nullptr,
1547  is_gpu_fetch,
1548  device_id_,
1549  getCoordsDataPtr(geo_target_ptr),
1550  getCoordsLength(geo_target_ptr),
1551  getRingSizesPtr(geo_target_ptr),
1552  getRingSizesLength(geo_target_ptr) * 4,
1553  getPolyRingsPtr(geo_target_ptr),
1554  getPolyRingsLength(geo_target_ptr) * 4);
1555  }
1556  break;
1557  }
1558  default:
1559  throw std::runtime_error("Unknown Geometry type encountered: " +
1560  target_info.sql_type.get_type_name());
1561  }
1562  UNREACHABLE();
1563  return TargetValue(nullptr);
1564 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:813
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:319
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define UNREACHABLE()
Definition: Logger.h:231
std::pair< ssize_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:594
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:805
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const int local_col_id
Definition: ResultSet.h:231
const Executor * getExecutor() const
std::string get_type_name() const
Definition: sqltypes.h:422
bool is_agg
Definition: TargetInfo.h:40
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
#define CHECK_LT(x, y)
Definition: Logger.h:197
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
const bool is_lazily_fetched
Definition: ResultSet.h:230
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187
bool is_geometry() const
Definition: sqltypes.h:458
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const int device_id_
Definition: ResultSet.h:773
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeTargetValue()

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1567 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), executor_, exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_comp_param(), get_compact_type(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_compression(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, is_distinct_target(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_string(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSUM, lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1573  {
1574  auto actual_compact_sz = compact_sz;
1575  if (target_info.sql_type.get_type() == kFLOAT &&
1578  actual_compact_sz = sizeof(float);
1579  } else {
1580  actual_compact_sz = sizeof(double);
1581  }
1582  if (target_info.is_agg &&
1583  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1584  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX)) {
1585  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1586  // padded size to 4 bytes to properly read each value.
1587  actual_compact_sz = sizeof(float);
1588  }
1589  }
1590  if (get_compact_type(target_info).is_date_in_days()) {
1591  // Dates encoded in days are converted to 8 byte values on read.
1592  actual_compact_sz = sizeof(int64_t);
1593  }
1594 
1595  // String dictionary keys are read as 32-bit values regardless of encoding
1596  if (target_info.sql_type.is_string() &&
1597  target_info.sql_type.get_compression() == kENCODING_DICT &&
1598  target_info.sql_type.get_comp_param()) {
1599  actual_compact_sz = sizeof(int32_t);
1600  }
1601 
1602  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1603  const auto& chosen_type = get_compact_type(target_info);
1604  if (!lazy_fetch_info_.empty()) {
1605  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1606  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1607  if (col_lazy_fetch.is_lazily_fetched) {
1608  CHECK_GE(ival, 0);
1609  const auto storage_idx = getStorageIndex(entry_buff_idx);
1610  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1611  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1612  ival = lazy_decode(
1613  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1614  if (chosen_type.is_fp()) {
1615  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1616  if (chosen_type.get_type() == kFLOAT) {
1617  return ScalarTargetValue(static_cast<float>(dval));
1618  } else {
1619  return ScalarTargetValue(dval);
1620  }
1621  }
1622  }
1623  }
1624  if (chosen_type.is_fp()) {
1625  switch (actual_compact_sz) {
1626  case 8: {
1627  const auto dval = *reinterpret_cast<const double*>(ptr);
1628  return chosen_type.get_type() == kFLOAT
1629  ? ScalarTargetValue(static_cast<const float>(dval))
1630  : ScalarTargetValue(dval);
1631  }
1632  case 4: {
1633  CHECK_EQ(kFLOAT, chosen_type.get_type());
1634  return *reinterpret_cast<const float*>(ptr);
1635  }
1636  default:
1637  CHECK(false);
1638  }
1639  }
1640  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1641  chosen_type.is_timeinterval()) {
1642  if (is_distinct_target(target_info)) {
1644  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1645  }
1646  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1647  // right type instead
1648  if (inline_int_null_val(chosen_type) ==
1649  int_resize_cast(ival, chosen_type.get_logical_size())) {
1650  return inline_int_null_val(target_info.sql_type);
1651  }
1652  return ival;
1653  }
1654  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1655  if (translate_strings) {
1656  if (static_cast<int32_t>(ival) ==
1657  NULL_INT) { // TODO(alex): this isn't nice, fix it
1658  return NullableString(nullptr);
1659  }
1660  StringDictionaryProxy* sdp{nullptr};
1661  if (!chosen_type.get_comp_param()) {
1662  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1663  } else {
1664  sdp = executor_
1665  ? executor_->getStringDictionaryProxy(
1666  chosen_type.get_comp_param(), row_set_mem_owner_, false)
1667  : row_set_mem_owner_->getStringDictProxy(chosen_type.get_comp_param());
1668  }
1669  return NullableString(sdp->getString(ival));
1670  } else {
1671  return static_cast<int64_t>(static_cast<int32_t>(ival));
1672  }
1673  }
1674  if (chosen_type.is_decimal()) {
1675  if (decimal_to_double) {
1676  if (ival ==
1677  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1678  return NULL_DOUBLE;
1679  }
1680  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1681  }
1682  return ival;
1683  }
1684  CHECK(false);
1685  return TargetValue(int64_t(0));
1686 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
#define NULL_DOUBLE
Definition: sqltypes.h:177
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:785
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:319
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define CHECK_GE(x, y)
Definition: Logger.h:200
std::pair< ssize_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:594
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:327
Definition: sqldefs.h:71
const SQLTypeInfo get_compact_type(const TargetInfo &target)
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
bool is_agg
Definition: TargetInfo.h:40
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
Definition: sqldefs.h:71
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:116
#define NULL_INT
Definition: sqltypes.h:174
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities > SQLTypeInfo
Definition: sqltypes.h:819
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:416
#define CHECK_LT(x, y)
Definition: Logger.h:197
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:328
#define CHECK(condition)
Definition: Logger.h:187
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Definition: sqldefs.h:71
Definition: sqldefs.h:71
bool is_string() const
Definition: sqltypes.h:446
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ makeVarlenTargetValue()

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1143 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK, CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, copy_from_gpu(), device_id_, device_type_, executor_, SQLTypeInfoCore< TYPE_FACET_PACK >::get_compression(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_elem_type(), SQLTypeInfoCore< TYPE_FACET_PACK >::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfoCore< TYPE_FACET_PACK >::is_array(), VarlenDatum::is_null, SQLTypeInfoCore< TYPE_FACET_PACK >::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run-benchmark-import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1150  {
1151  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1152  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1153  if (varlen_ptr < 0) {
1154  CHECK_EQ(-1, varlen_ptr);
1155  if (target_info.sql_type.get_type() == kARRAY) {
1156  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1157  }
1158  return TargetValue(nullptr);
1159  }
1160  const auto storage_idx = getStorageIndex(entry_buff_idx);
1161  if (target_info.sql_type.is_string()) {
1162  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1163  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1164  const auto& varlen_buffer_for_storage =
1165  serialized_varlen_buffer_[storage_idx.first];
1166  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1167  return varlen_buffer_for_storage[varlen_ptr];
1168  } else if (target_info.sql_type.get_type() == kARRAY) {
1169  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1170  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1171  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1172 
1173  return build_array_target_value(
1174  target_info.sql_type,
1175  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1176  varlen_buffer[varlen_ptr].size(),
1177  translate_strings,
1179  executor_);
1180  } else {
1181  CHECK(false);
1182  }
1183  }
1184  if (!lazy_fetch_info_.empty()) {
1185  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1186  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1187  if (col_lazy_fetch.is_lazily_fetched) {
1188  const auto storage_idx = getStorageIndex(entry_buff_idx);
1189  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1190  auto& frag_col_buffers =
1191  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1192  bool is_end{false};
1193  if (target_info.sql_type.is_string()) {
1194  VarlenDatum vd;
1195  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1196  frag_col_buffers[col_lazy_fetch.local_col_id])),
1197  varlen_ptr,
1198  false,
1199  &vd,
1200  &is_end);
1201  CHECK(!is_end);
1202  if (vd.is_null) {
1203  return TargetValue(nullptr);
1204  }
1205  CHECK(vd.pointer);
1206  CHECK_GT(vd.length, 0u);
1207  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1208  return fetched_str;
1209  } else {
1210  CHECK(target_info.sql_type.is_array());
1211  ArrayDatum ad;
1212  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1213  frag_col_buffers[col_lazy_fetch.local_col_id])),
1214  varlen_ptr,
1215  &ad,
1216  &is_end);
1217  CHECK(!is_end);
1218  if (ad.is_null) {
1219  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1220  }
1221  CHECK_GE(ad.length, 0u);
1222  if (ad.length > 0) {
1223  CHECK(ad.pointer);
1224  }
1225  return build_array_target_value(target_info.sql_type,
1226  ad.pointer,
1227  ad.length,
1228  translate_strings,
1230  executor_);
1231  }
1232  }
1233  }
1234  if (!varlen_ptr) {
1235  if (target_info.sql_type.is_array()) {
1236  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1237  }
1238  return TargetValue(nullptr);
1239  }
1240  auto length = read_int_from_buff(ptr2, compact_sz2);
1241  if (target_info.sql_type.is_array()) {
1242  const auto& elem_ti = target_info.sql_type.get_elem_type();
1243  length *= elem_ti.get_array_context_logical_size();
1244  }
1245  std::vector<int8_t> cpu_buffer;
1246  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1247  cpu_buffer.resize(length);
1248  const auto executor = query_mem_desc_.getExecutor();
1249  CHECK(executor);
1250  auto& data_mgr = executor->catalog_->getDataMgr();
1251  copy_from_gpu(&data_mgr,
1252  &cpu_buffer[0],
1253  static_cast<CUdeviceptr>(varlen_ptr),
1254  length,
1255  device_id_);
1256  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1257  }
1258  if (target_info.sql_type.is_array()) {
1259  return build_array_target_value(target_info.sql_type,
1260  reinterpret_cast<const int8_t*>(varlen_ptr),
1261  length,
1262  translate_strings,
1264  executor_);
1265  }
1266  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1267 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
bool is_null
Definition: sqltypes.h:73
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:785
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:319
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define CHECK_GE(x, y)
Definition: Logger.h:200
std::pair< ssize_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:594
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:805
#define CHECK_GT(x, y)
Definition: Logger.h:199
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:327
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:72
const Executor * getExecutor() const
bool is_array() const
Definition: sqltypes.h:454
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
bool is_agg
Definition: TargetInfo.h:40
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
boost::optional< std::vector< ScalarTargetValue > > ArrayTargetValue
Definition: TargetValue.h:157
SQLTypeInfoCore get_elem_type() const
Definition: sqltypes.h:628
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
#define CHECK_LT(x, y)
Definition: Logger.h:197
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
bool is_string() const
Definition: sqltypes.h:446
std::conditional_t< isCudaCC(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:119
TargetValue build_array_target_value(const SQLTypeInfo &array_ti, const int8_t *buff, const size_t buff_sz, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
size_t length
Definition: sqltypes.h:71
const int device_id_
Definition: ResultSet.h:773
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ moveToBegin()

void ResultSet::moveToBegin ( ) const

Definition at line 435 of file ResultSet.cpp.

References crt_row_buff_idx_, and fetched_so_far_.

Referenced by rowCount().

435  {
436  crt_row_buff_idx_ = 0;
437  fetched_so_far_ = 0;
438 }
size_t fetched_so_far_
Definition: ResultSet.h:778
size_t crt_row_buff_idx_
Definition: ResultSet.h:777
+ Here is the caller graph for this function:

◆ parallelRowCount()

size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 346 of file ResultSet.cpp.

References cpu_threads(), drop_first_, entryCount(), isRowAtEmpty(), and keep_first_.

Referenced by rowCount().

346  {
347  size_t row_count{0};
348  const size_t worker_count = cpu_threads();
349  std::vector<std::future<size_t>> counter_threads;
350  for (size_t i = 0,
351  start_entry = 0,
352  stride = (entryCount() + worker_count - 1) / worker_count;
353  i < worker_count && start_entry < entryCount();
354  ++i, start_entry += stride) {
355  const auto end_entry = std::min(start_entry + stride, entryCount());
356  counter_threads.push_back(std::async(
357  std::launch::async,
358  [this](const size_t start, const size_t end) {
359  size_t row_count{0};
360  for (size_t i = start; i < end; ++i) {
361  if (!isRowAtEmpty(i)) {
362  ++row_count;
363  }
364  }
365  return row_count;
366  },
367  start_entry,
368  end_entry));
369  }
370  for (auto& child : counter_threads) {
371  child.wait();
372  }
373  for (auto& child : counter_threads) {
374  row_count += child.get();
375  }
376  if (keep_first_ + drop_first_) {
377  const auto limited_row_count = std::min(keep_first_ + drop_first_, row_count);
378  return limited_row_count < drop_first_ ? 0 : limited_row_count - drop_first_;
379  }
380  return row_count;
381 }
size_t entryCount() const
size_t keep_first_
Definition: ResultSet.h:780
size_t drop_first_
Definition: ResultSet.h:779
bool isRowAtEmpty(const size_t index) const
int cpu_threads()
Definition: thread_count.h:23
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parallelTop()

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Definition at line 555 of file ResultSet.cpp.

References cpu_threads(), createComparator(), initPermutationBuffer(), permutation_, and topPermutation().

Referenced by sort().

556  {
557  const size_t step = cpu_threads();
558  std::vector<std::vector<uint32_t>> strided_permutations(step);
559  std::vector<std::future<void>> init_futures;
560  for (size_t start = 0; start < step; ++start) {
561  init_futures.emplace_back(
562  std::async(std::launch::async, [this, start, step, &strided_permutations] {
563  strided_permutations[start] = initPermutationBuffer(start, step);
564  }));
565  }
566  for (auto& init_future : init_futures) {
567  init_future.wait();
568  }
569  for (auto& init_future : init_futures) {
570  init_future.get();
571  }
572  auto compare = createComparator(order_entries, true);
573  std::vector<std::future<void>> top_futures;
574  for (auto& strided_permutation : strided_permutations) {
575  top_futures.emplace_back(
576  std::async(std::launch::async, [&strided_permutation, &compare, top_n] {
577  topPermutation(strided_permutation, top_n, compare);
578  }));
579  }
580  for (auto& top_future : top_futures) {
581  top_future.wait();
582  }
583  for (auto& top_future : top_futures) {
584  top_future.get();
585  }
586  permutation_.reserve(strided_permutations.size() * top_n);
587  for (const auto& strided_permutation : strided_permutations) {
588  permutation_.insert(
589  permutation_.end(), strided_permutation.begin(), strided_permutation.end());
590  }
591  topPermutation(permutation_, top_n, compare);
592 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:533
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:748
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:710
int cpu_threads()
Definition: thread_count.h:23
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnCpu()

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 802 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK, CHECK_EQ, QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), QueryMemoryDescriptor::hasKeylessHash(), query_mem_desc_, sort_groups_cpu(), and storage_.

Referenced by sort().

803  {
805  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
806  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
807  CHECK_EQ(size_t(1), order_entries.size());
808  auto buffer_ptr = storage_->getUnderlyingBuffer();
809  for (const auto& order_entry : order_entries) {
810  const auto target_idx = order_entry.tle_no - 1;
811  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
812  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
813  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
814  sort_groups_cpu(sortkey_val_buff,
815  &idx_buff[0],
817  order_entry.is_desc,
818  chosen_bytes);
819  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
820  &idx_buff[0],
822  &tmp_buff[0],
823  sizeof(int64_t));
824  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
825  ++target_idx) {
826  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
827  continue;
828  }
829  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
830  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
831  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
832  apply_permutation_cpu(satellite_val_buff,
833  &idx_buff[0],
835  &tmp_buff[0],
836  chosen_bytes);
837  }
838  }
839 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK(condition)
Definition: Logger.h:187
size_t getColOffInBytes(const size_t col_idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ radixSortOnGpu()

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 768 of file ResultSet.cpp.

References copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), executor_, QueryMemoryDescriptor::getBufferSizeBytes(), GPU, inplace_sort_gpu(), KernelPerFragment, query_mem_desc_, and storage_.

Referenced by sort().

769  {
770  auto data_mgr = &executor_->catalog_->getDataMgr();
771  const int device_id{0};
772  CudaAllocator cuda_allocator(data_mgr, device_id);
773  std::vector<int64_t*> group_by_buffers(executor_->blockSize());
774  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
775  auto dev_group_by_buffers =
776  create_dev_group_by_buffers(&cuda_allocator,
777  group_by_buffers,
779  executor_->blockSize(),
780  executor_->gridSize(),
781  device_id,
783  -1,
784  true,
785  true,
786  false,
787  nullptr);
789  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
791  data_mgr,
792  group_by_buffers,
794  dev_group_by_buffers.second,
796  executor_->blockSize(),
797  executor_->gridSize(),
798  device_id,
799  false);
800 }
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *cuda_allocator, const std::vector< int64_t *> &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:61
const Executor * executor_
Definition: ResultSet.h:785
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
void copy_group_by_buffers_from_gpu(Data_Namespace::DataMgr *data_mgr, const std::vector< int64_t *> &group_by_buffers, const size_t groups_buffer_size, const CUdeviceptr group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer)
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rowCount()

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Definition at line 310 of file ResultSet.cpp.

References cached_row_count_, CHECK_GE, entryCount(), getNextRowUnlocked(), just_explain_, moveToBegin(), parallelRowCount(), permutation_, row_iteration_mutex_, and storage_.

310  {
311  if (just_explain_) {
312  return 1;
313  }
314  if (!permutation_.empty()) {
315  return permutation_.size();
316  }
317  if (cached_row_count_ != -1) {
319  return cached_row_count_;
320  }
321  if (!storage_) {
322  return 0;
323  }
324  if (force_parallel || entryCount() > 20000) {
325  return parallelRowCount();
326  }
327  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
328  moveToBegin();
329  size_t row_count{0};
330  while (true) {
331  auto crt_row = getNextRowUnlocked(false, false);
332  if (crt_row.empty()) {
333  break;
334  }
335  ++row_count;
336  }
337  moveToBegin();
338  return row_count;
339 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:810
size_t entryCount() const
#define CHECK_GE(x, y)
Definition: Logger.h:200
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
const bool just_explain_
Definition: ResultSet.h:808
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
size_t parallelRowCount() const
Definition: ResultSet.cpp:346
void moveToBegin() const
Definition: ResultSet.cpp:435
+ Here is the call graph for this function:

◆ rowIterator() [1/2]

ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 330 of file ResultSet.h.

332  {
333  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
334 
335  // move to first logical position
336  ++rowIterator;
337 
338  for (size_t index = 0; index < from_logical_index; index++) {
339  ++rowIterator;
340  }
341 
342  return rowIterator;
343  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:330

◆ rowIterator() [2/2]

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 345 of file ResultSet.h.

346  {
347  return rowIterator(0, translate_strings, decimal_to_double);
348  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:330

◆ serialize()

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const

◆ serializeCountDistinctColumns()

void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private

◆ serializeProjection()

void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private

◆ serializeVarlenAggColumn()

void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private

◆ setCachedRowCount()

void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 341 of file ResultSet.cpp.

References cached_row_count_, and CHECK.

341  {
342  CHECK(cached_row_count_ == -1 || cached_row_count_ == static_cast<ssize_t>(row_count));
343  cached_row_count_ = row_count;
344 }
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
#define CHECK(condition)
Definition: Logger.h:187

◆ setGeoReturnType()

void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 494 of file ResultSet.h.

494 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:813

◆ setQueueTime()

void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 423 of file ResultSet.cpp.

References queue_time_ms_.

423  {
424  queue_time_ms_ = queue_time;
425 }
int64_t queue_time_ms_
Definition: ResultSet.h:783

◆ setSeparateVarlenStorageValid()

void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 516 of file ResultSet.h.

516  {
518  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806

◆ sort()

void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)

Definition at line 464 of file ResultSet.cpp.

References Executor::baseline_threshold, baselineSort(), cached_row_count_, canUseFastBaselineSort(), CHECK, CHECK_EQ, CPU, createComparator(), doBaselineSort(), entryCount(), g_enable_watchdog, QueryMemoryDescriptor::getEntryCount(), getGpuCount(), GPU, initPermutationBuffer(), LOG, parallelTop(), permutation_, query_mem_desc_, radixSortOnCpu(), radixSortOnGpu(), QueryMemoryDescriptor::sortOnGpu(), sortPermutation(), targets_, topPermutation(), and logger::WARNING.

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeRelAlgStep().

465  {
467  CHECK(!targets_.empty());
468 #ifdef HAVE_CUDA
469  if (canUseFastBaselineSort(order_entries, top_n)) {
470  baselineSort(order_entries, top_n);
471  return;
472  }
473 #endif // HAVE_CUDA
474  if (query_mem_desc_.sortOnGpu()) {
475  try {
476  radixSortOnGpu(order_entries);
477  } catch (const OutOfMemory&) {
478  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
479  radixSortOnCpu(order_entries);
480  } catch (const std::bad_alloc&) {
481  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
482  radixSortOnCpu(order_entries);
483  }
484  return;
485  }
486  // This check isn't strictly required, but allows the index buffer to be 32-bit.
487  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
488  throw RowSortException("Sorting more than 4B elements not supported");
489  }
490 
491  CHECK(permutation_.empty());
492 
493  const bool use_heap{order_entries.size() == 1 && top_n};
494  if (use_heap && entryCount() > 100000) {
495  if (g_enable_watchdog && (entryCount() > 20000000)) {
496  throw WatchdogException("Sorting the result would be too slow");
497  }
498  parallelTop(order_entries, top_n);
499  return;
500  }
501 
503  throw WatchdogException("Sorting the result would be too slow");
504  }
505 
507 
508  auto compare = createComparator(order_entries, use_heap);
509 
510  if (use_heap) {
511  topPermutation(permutation_, top_n, compare);
512  } else {
513  sortPermutation(compare);
514  }
515 }
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
#define CHECK_EQ(x, y)
Definition: Logger.h:195
size_t entryCount() const
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:802
#define LOG(tag)
Definition: Logger.h:182
static const size_t baseline_threshold
Definition: Execute.h:980
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:768
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:533
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:809
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:748
void sortPermutation(const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:763
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
Definition: ResultSet.cpp:555
#define CHECK(condition)
Definition: Logger.h:187
bool g_enable_watchdog
Definition: Execute.cpp:69
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:710
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ sortPermutation()

void ResultSet::sortPermutation ( const std::function< bool(const uint32_t, const uint32_t)>  compare)
private

Definition at line 763 of file ResultSet.cpp.

References permutation_.

Referenced by sort().

764  {
765  std::sort(permutation_.begin(), permutation_.end(), compare);
766 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
+ Here is the caller graph for this function:

◆ syncEstimatorBuffer()

void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 410 of file ResultSet.cpp.

References CHECK, CHECK_EQ, checked_calloc(), copy_from_gpu(), data_mgr_, device_id_, device_type_, estimator_, estimator_buffer_, GPU, and host_estimator_buffer_.

410  {
413  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
415  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
418  reinterpret_cast<CUdeviceptr>(estimator_buffer_),
419  estimator_->getBufferSize(),
420  device_id_);
421 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
int8_t * estimator_buffer_
Definition: ResultSet.h:798
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:48
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:800
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:797
int8_t * host_estimator_buffer_
Definition: ResultSet.h:799
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define CHECK(condition)
Definition: Logger.h:187
const int device_id_
Definition: ResultSet.h:773
+ Here is the call graph for this function:

◆ topPermutation()

void ResultSet::topPermutation ( std::vector< uint32_t > &  to_sort,
const size_t  n,
const std::function< bool(const uint32_t, const uint32_t)>  compare 
)
staticprivate

Definition at line 748 of file ResultSet.cpp.

Referenced by parallelTop(), and sort().

751  {
752  std::make_heap(to_sort.begin(), to_sort.end(), compare);
753  std::vector<uint32_t> permutation_top;
754  permutation_top.reserve(n);
755  for (size_t i = 0; i < n && !to_sort.empty(); ++i) {
756  permutation_top.push_back(to_sort.front());
757  std::pop_heap(to_sort.begin(), to_sort.end(), compare);
758  to_sort.pop_back();
759  }
760  to_sort.swap(permutation_top);
761 }
+ Here is the caller graph for this function:

◆ unserialize()

static std::unique_ptr<ResultSet> ResultSet::unserialize ( const TSerializedRows &  serialized_rows,
const Executor  
)
static

◆ unserializeCountDistinctColumns()

void ResultSet::unserializeCountDistinctColumns ( const TSerializedRows &  )
private

◆ updateStorageEntryCount()

void ResultSet::updateStorageEntryCount ( const size_t  new_entry_count)
inline

Definition at line 358 of file ResultSet.h.

References File_Namespace::append(), CHECK, anonymous_namespace{TypedDataAccessors.h}::decimal_to_double(), QueryMemoryDescriptor::getQueryDescriptionType(), Projection, ResultSetStorage::query_mem_desc_, ResultSetStorage::ResultSet, and QueryMemoryDescriptor::setEntryCount().

358  {
360  query_mem_desc_.setEntryCount(new_entry_count);
361  CHECK(storage_);
362  storage_->updateEntryCount(new_entry_count);
363  }
void setEntryCount(const size_t val)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
#define CHECK(condition)
Definition: Logger.h:187
QueryDescriptionType getQueryDescriptionType() const
+ Here is the call graph for this function:

Friends And Related Function Documentation

◆ ResultSetManager

friend class ResultSetManager
friend

Definition at line 820 of file ResultSet.h.

◆ ResultSetRowIterator

friend class ResultSetRowIterator
friend

Definition at line 821 of file ResultSet.h.

Member Data Documentation

◆ appended_storage_

std::vector<std::unique_ptr<ResultSetStorage> > ResultSet::appended_storage_
private

Definition at line 776 of file ResultSet.h.

Referenced by append(), copyColumnIntoBuffer(), findStorage(), getStorageIndex(), and ~ResultSet().

◆ cached_row_count_

std::atomic<ssize_t> ResultSet::cached_row_count_
mutableprivate

Definition at line 809 of file ResultSet.h.

Referenced by append(), ResultSet(), rowCount(), setCachedRowCount(), and sort().

◆ chunk_iters_

std::vector<std::shared_ptr<std::list<ChunkIter> > > ResultSet::chunk_iters_
private

Definition at line 788 of file ResultSet.h.

Referenced by append().

◆ chunks_

std::list<std::shared_ptr<Chunk_NS::Chunk> > ResultSet::chunks_
private

Definition at line 787 of file ResultSet.h.

Referenced by append().

◆ col_buffers_

std::vector<std::vector<std::vector<const int8_t*> > > ResultSet::col_buffers_
private

◆ column_wise_comparator_

std::unique_ptr<ResultSetComparator<ColumnWiseTargetAccessor> > ResultSet::column_wise_comparator_
private

Definition at line 818 of file ResultSet.h.

◆ consistent_frag_sizes_

std::vector<std::vector<int64_t> > ResultSet::consistent_frag_sizes_
private

Definition at line 795 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ crt_row_buff_idx_

size_t ResultSet::crt_row_buff_idx_
mutableprivate

◆ data_mgr_

Data_Namespace::DataMgr* ResultSet::data_mgr_
private

Definition at line 800 of file ResultSet.h.

Referenced by ResultSet(), and syncEstimatorBuffer().

◆ device_id_

const int ResultSet::device_id_
private

◆ device_type_

◆ drop_first_

size_t ResultSet::drop_first_
private

Definition at line 779 of file ResultSet.h.

Referenced by advanceCursorToNextEntry(), isTruncated(), parallelRowCount(), and ResultSet().

◆ estimator_

const std::shared_ptr<const Analyzer::Estimator> ResultSet::estimator_
private

Definition at line 797 of file ResultSet.h.

Referenced by definitelyHasNoRows(), ResultSet(), and syncEstimatorBuffer().

◆ estimator_buffer_

int8_t* ResultSet::estimator_buffer_
private

Definition at line 798 of file ResultSet.h.

Referenced by getDeviceEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ executor_

◆ explanation_

std::string ResultSet::explanation_
private

Definition at line 807 of file ResultSet.h.

◆ fetched_so_far_

size_t ResultSet::fetched_so_far_
mutableprivate

Definition at line 778 of file ResultSet.h.

Referenced by moveToBegin(), and ResultSet().

◆ frag_offsets_

std::vector<std::vector<std::vector<int64_t> > > ResultSet::frag_offsets_
private

Definition at line 794 of file ResultSet.h.

Referenced by append(), getColumnFrag(), and ResultSet().

◆ geo_return_type_

GeoReturnType ResultSet::geo_return_type_
mutableprivate

Definition at line 813 of file ResultSet.h.

Referenced by makeGeoTargetValue(), and ResultSet().

◆ host_estimator_buffer_

int8_t* ResultSet::host_estimator_buffer_
mutableprivate

Definition at line 799 of file ResultSet.h.

Referenced by getHostEstimatorBuffer(), ResultSet(), syncEstimatorBuffer(), and ~ResultSet().

◆ just_explain_

const bool ResultSet::just_explain_
private

Definition at line 808 of file ResultSet.h.

Referenced by colCount(), definitelyHasNoRows(), getColType(), isExplain(), ResultSet(), and rowCount().

◆ keep_first_

size_t ResultSet::keep_first_
private

◆ lazy_fetch_info_

const std::vector<ColumnLazyFetchInfo> ResultSet::lazy_fetch_info_
private

◆ literal_buffers_

std::vector<std::vector<int8_t> > ResultSet::literal_buffers_
private

Definition at line 791 of file ResultSet.h.

Referenced by append().

◆ permutation_

std::vector<uint32_t> ResultSet::permutation_
private

◆ query_mem_desc_

◆ queue_time_ms_

int64_t ResultSet::queue_time_ms_
private

◆ render_time_ms_

int64_t ResultSet::render_time_ms_
private

Definition at line 784 of file ResultSet.h.

Referenced by getRenderTime(), and ResultSet().

◆ row_iteration_mutex_

std::mutex ResultSet::row_iteration_mutex_
mutableprivate

Definition at line 810 of file ResultSet.h.

Referenced by rowCount().

◆ row_set_mem_owner_

const std::shared_ptr<RowSetMemoryOwner> ResultSet::row_set_mem_owner_
private

◆ row_wise_comparator_

std::unique_ptr<ResultSetComparator<RowWiseTargetAccessor> > ResultSet::row_wise_comparator_
private

Definition at line 817 of file ResultSet.h.

◆ separate_varlen_storage_valid_

bool ResultSet::separate_varlen_storage_valid_
private

◆ serialized_varlen_buffer_

std::vector<SerializedVarlenBufferStorage> ResultSet::serialized_varlen_buffer_
private

Definition at line 805 of file ResultSet.h.

Referenced by append(), makeGeoTargetValue(), and makeVarlenTargetValue().

◆ storage_

◆ targets_

const std::vector<TargetInfo> ResultSet::targets_
private

The documentation for this class was generated from the following files: