OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ResultSet Class Reference

#include <ResultSet.h>

+ Collaboration diagram for ResultSet:

Classes

struct  ColumnWiseTargetAccessor
 
struct  QueryExecutionTimings
 
struct  ResultSetComparator
 
struct  RowWiseTargetAccessor
 
struct  StorageLookupResult
 
struct  TargetOffsets
 
struct  VarlenTargetPtrPair
 

Public Types

enum  GeoReturnType { GeoReturnType::GeoTargetValue, GeoReturnType::WktString, GeoReturnType::GeoTargetValuePtr, GeoReturnType::GeoTargetValueGpuPtr }
 

Public Member Functions

 ResultSet (const std::vector< TargetInfo > &targets, const ExecutorDeviceType device_type, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::vector< TargetInfo > &targets, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< int64_t >> &frag_offsets, const std::vector< int64_t > &consistent_frag_sizes, const ExecutorDeviceType device_type, const int device_id, const QueryMemoryDescriptor &query_mem_desc, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
 
 ResultSet (const std::shared_ptr< const Analyzer::Estimator >, const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr *data_mgr)
 
 ResultSet (const std::string &explanation)
 
 ResultSet (int64_t queue_time_ms, int64_t render_time_ms, const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 ~ResultSet ()
 
ResultSetRowIterator rowIterator (size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
 
ResultSetRowIterator rowIterator (bool translate_strings, bool decimal_to_double) const
 
ExecutorDeviceType getDeviceType () const
 
const ResultSetStorageallocateStorage () const
 
const ResultSetStorageallocateStorage (int8_t *, const std::vector< int64_t > &) const
 
const ResultSetStorageallocateStorage (const std::vector< int64_t > &) const
 
void updateStorageEntryCount (const size_t new_entry_count)
 
std::vector< TargetValuegetNextRow (const bool translate_strings, const bool decimal_to_double) const
 
size_t getCurrentRowBufferIndex () const
 
std::vector< TargetValuegetRowAt (const size_t index) const
 
TargetValue getRowAt (const size_t row_idx, const size_t col_idx, const bool translate_strings, const bool decimal_to_double=true) const
 
OneIntegerColumnRow getOneColRow (const size_t index) const
 
std::vector< TargetValuegetRowAtNoTranslations (const size_t index, const std::vector< bool > &targets_to_skip={}) const
 
bool isRowAtEmpty (const size_t index) const
 
void sort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void keepFirstN (const size_t n)
 
void dropFirstN (const size_t n)
 
void append (ResultSet &that)
 
const ResultSetStoragegetStorage () const
 
size_t colCount () const
 
SQLTypeInfo getColType (const size_t col_idx) const
 
size_t rowCount (const bool force_parallel=false) const
 
void setCachedRowCount (const size_t row_count) const
 
size_t entryCount () const
 
size_t getBufferSizeBytes (const ExecutorDeviceType device_type) const
 
bool definitelyHasNoRows () const
 
const QueryMemoryDescriptorgetQueryMemDesc () const
 
const std::vector< TargetInfo > & getTargetInfos () const
 
const std::vector< int64_t > & getTargetInitVals () const
 
int8_t * getDeviceEstimatorBuffer () const
 
int8_t * getHostEstimatorBuffer () const
 
void syncEstimatorBuffer () const
 
size_t getNDVEstimator () const
 
void setQueueTime (const int64_t queue_time)
 
void setKernelQueueTime (const int64_t kernel_queue_time)
 
void addCompilationQueueTime (const int64_t compilation_queue_time)
 
int64_t getQueueTime () const
 
int64_t getRenderTime () const
 
void moveToBegin () const
 
bool isTruncated () const
 
bool isExplain () const
 
bool isGeoColOnGpu (const size_t col_idx) const
 
int getDeviceId () const
 
void fillOneEntry (const std::vector< int64_t > &entry)
 
void initializeStorage () const
 
void holdChunks (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks)
 
void holdChunkIterators (const std::shared_ptr< std::list< ChunkIter >> chunk_iters)
 
void holdLiterals (std::vector< int8_t > &literal_buff)
 
std::shared_ptr
< RowSetMemoryOwner
getRowSetMemOwner () const
 
const std::vector< uint32_t > & getPermutationBuffer () const
 
const bool isPermutationBufferEmpty () const
 
void serialize (TSerializedRows &serialized_rows) const
 
size_t getLimit () const
 
GeoReturnType getGeoReturnType () const
 
void setGeoReturnType (const GeoReturnType val)
 
void copyColumnIntoBuffer (const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
 
bool isDirectColumnarConversionPossible () const
 
bool didOutputColumnar () const
 
bool isZeroCopyColumnarConversionPossible (size_t column_idx) const
 
const int8_t * getColumnarBuffer (size_t column_idx) const
 
QueryDescriptionType getQueryDescriptionType () const
 
const int8_t getPaddedSlotWidthBytes (const size_t slot_idx) const
 
std::tuple< std::vector< bool >
, size_t > 
getSingleSlotTargetBitmap () const
 
std::tuple< std::vector< bool >
, size_t > 
getSupportedSingleSlotTargetBitmap () const
 
std::vector< size_t > getSlotIndicesForTargetIndices () const
 
const std::vector
< ColumnLazyFetchInfo > & 
getLazyFetchInfo () const
 
void setSeparateVarlenStorageValid (const bool val)
 
std::shared_ptr< const
std::vector< std::string > > 
getStringDictionaryPayloadCopy (const int dict_id) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE getEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 

Static Public Member Functions

static QueryMemoryDescriptor fixupQueryMemoryDescriptor (const QueryMemoryDescriptor &)
 
static std::unique_ptr< ResultSetunserialize (const TSerializedRows &serialized_rows, const Executor *)
 

Private Types

using BufferSet = std::set< int64_t >
 
using SerializedVarlenBufferStorage = std::vector< std::string >
 

Private Member Functions

void advanceCursorToNextEntry (ResultSetRowIterator &iter) const
 
std::vector< TargetValuegetNextRowImpl (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetNextRowUnlocked (const bool translate_strings, const bool decimal_to_double) const
 
std::vector< TargetValuegetRowAt (const size_t index, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers, const std::vector< bool > &targets_to_skip={}) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarPerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWisePerfectHashEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getRowWiseBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
template<typename ENTRY_TYPE >
ENTRY_TYPE getColumnarBaselineEntryAt (const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
 
size_t binSearchRowCount () const
 
size_t parallelRowCount () const
 
size_t advanceCursorToNextEntry () const
 
void radixSortOnGpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
void radixSortOnCpu (const std::list< Analyzer::OrderEntry > &order_entries) const
 
TargetValue getTargetValueFromBufferRowwise (int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
 
TargetValue getTargetValueFromBufferColwise (const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
 
TargetValue makeTargetValue (const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
 
TargetValue makeVarlenTargetValue (const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
 
TargetValue makeGeoTargetValue (const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
 
InternalTargetValue getColumnInternal (const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
InternalTargetValue getVarlenOrderEntry (const int64_t str_ptr, const size_t str_len) const
 
int64_t lazyReadInt (const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
 
std::pair< size_t, size_t > getStorageIndex (const size_t entry_idx) const
 
const std::vector< const
int8_t * > & 
getColumnFrag (const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
 
StorageLookupResult findStorage (const size_t entry_idx) const
 
std::function< bool(const
uint32_t, const uint32_t)> 
createComparator (const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
 
void sortPermutation (const std::function< bool(const uint32_t, const uint32_t)> compare)
 
std::vector< uint32_t > initPermutationBuffer (const size_t start, const size_t step)
 
void parallelTop (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void baselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
void doBaselineSort (const ExecutorDeviceType device_type, const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
bool canUseFastBaselineSort (const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
 
Data_Namespace::DataMgrgetDataManager () const
 
int getGpuCount () const
 
void serializeProjection (TSerializedRows &serialized_rows) const
 
void serializeVarlenAggColumn (int8_t *buf, std::vector< std::string > &varlen_bufer) const
 
void serializeCountDistinctColumns (TSerializedRows &) const
 
void unserializeCountDistinctColumns (const TSerializedRows &)
 
void fixupCountDistinctPointers ()
 
void create_active_buffer_set (BufferSet &count_distinct_active_buffer_set) const
 
int64_t getDistinctBufferRefFromBufferRowwise (int8_t *rowwise_target_ptr, const TargetInfo &target_info) const
 

Static Private Member Functions

static bool isNull (const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
 
static void topPermutation (std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
 

Private Attributes

const std::vector< TargetInfotargets_
 
const ExecutorDeviceType device_type_
 
const int device_id_
 
QueryMemoryDescriptor query_mem_desc_
 
std::unique_ptr< ResultSetStoragestorage_
 
AppendedStorage appended_storage_
 
size_t crt_row_buff_idx_
 
size_t fetched_so_far_
 
size_t drop_first_
 
size_t keep_first_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
std::vector< uint32_t > permutation_
 
QueryExecutionTimings timings_
 
const Executorexecutor_
 
std::list< std::shared_ptr
< Chunk_NS::Chunk > > 
chunks_
 
std::vector< std::shared_ptr
< std::list< ChunkIter > > > 
chunk_iters_
 
std::vector< std::vector
< int8_t > > 
literal_buffers_
 
const std::vector
< ColumnLazyFetchInfo
lazy_fetch_info_
 
std::vector< std::vector
< std::vector< const int8_t * > > > 
col_buffers_
 
std::vector< std::vector
< std::vector< int64_t > > > 
frag_offsets_
 
std::vector< std::vector
< int64_t > > 
consistent_frag_sizes_
 
const std::shared_ptr< const
Analyzer::Estimator
estimator_
 
Data_Namespace::AbstractBufferdevice_estimator_buffer_ {nullptr}
 
int8_t * host_estimator_buffer_ {nullptr}
 
Data_Namespace::DataMgrdata_mgr_
 
std::vector
< SerializedVarlenBufferStorage
serialized_varlen_buffer_
 
bool separate_varlen_storage_valid_
 
std::string explanation_
 
const bool just_explain_
 
std::atomic< ssize_t > cached_row_count_
 
std::mutex row_iteration_mutex_
 
GeoReturnType geo_return_type_
 
std::unique_ptr
< ResultSetComparator
< RowWiseTargetAccessor > > 
row_wise_comparator_
 
std::unique_ptr
< ResultSetComparator
< ColumnWiseTargetAccessor > > 
column_wise_comparator_
 

Friends

class ResultSetManager
 
class ResultSetRowIterator
 
class ColumnarResults
 

Detailed Description

Definition at line 304 of file ResultSet.h.

Member Typedef Documentation

using ResultSet::BufferSet = std::set<int64_t>
private

Definition at line 830 of file ResultSet.h.

using ResultSet::SerializedVarlenBufferStorage = std::vector<std::string>
private

Definition at line 868 of file ResultSet.h.

Member Enumeration Documentation

Geo return type options when accessing geo columns from a result set.

Enumerator
GeoTargetValue 

Copies the geo data into a struct of vectors - coords are uncompressed

WktString 

Returns the geo data as a WKT string

GeoTargetValuePtr 

Returns only the pointers of the underlying buffers for the geo data.

GeoTargetValueGpuPtr 

If geo data is currently on a device, keep the data on the device and return the device ptrs

Definition at line 499 of file ResultSet.h.

499  {
502  WktString,
505  GeoTargetValueGpuPtr
507  };
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:161
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165

Constructor & Destructor Documentation

ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const ExecutorDeviceType  device_type,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

Definition at line 104 of file ResultSet.cpp.

109  : targets_(targets)
110  , device_type_(device_type)
111  , device_id_(-1)
112  , query_mem_desc_(query_mem_desc)
113  , crt_row_buff_idx_(0)
114  , fetched_so_far_(0)
115  , drop_first_(0)
116  , keep_first_(0)
117  , row_set_mem_owner_(row_set_mem_owner)
118  , executor_(executor)
119  , data_mgr_(nullptr)
121  , just_explain_(false)
122  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
size_t keep_first_
Definition: ResultSet.h:845
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
size_t drop_first_
Definition: ResultSet.h:844
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838
ResultSet::ResultSet ( const std::vector< TargetInfo > &  targets,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const std::vector< std::vector< const int8_t * >> &  col_buffers,
const std::vector< std::vector< int64_t >> &  frag_offsets,
const std::vector< int64_t > &  consistent_frag_sizes,
const ExecutorDeviceType  device_type,
const int  device_id,
const QueryMemoryDescriptor query_mem_desc,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
const Executor executor 
)

Definition at line 125 of file ResultSet.cpp.

135  : targets_(targets)
136  , device_type_(device_type)
137  , device_id_(device_id)
138  , query_mem_desc_(query_mem_desc)
139  , crt_row_buff_idx_(0)
140  , fetched_so_far_(0)
141  , drop_first_(0)
142  , keep_first_(0)
143  , row_set_mem_owner_(row_set_mem_owner)
144  , executor_(executor)
145  , lazy_fetch_info_(lazy_fetch_info)
146  , col_buffers_{col_buffers}
147  , frag_offsets_{frag_offsets}
148  , consistent_frag_sizes_{consistent_frag_sizes}
149  , data_mgr_(nullptr)
151  , just_explain_(false)
152  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
size_t keep_first_
Definition: ResultSet.h:845
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
size_t drop_first_
Definition: ResultSet.h:844
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:860
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:859
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838
ResultSet::ResultSet ( const std::shared_ptr< const Analyzer::Estimator ,
const ExecutorDeviceType  device_type,
const int  device_id,
Data_Namespace::DataMgr data_mgr 
)
ResultSet::ResultSet ( const std::string &  explanation)

Definition at line 181 of file ResultSet.cpp.

References CPU.

183  , device_id_(-1)
184  , fetched_so_far_(0)
186  , explanation_(explanation)
187  , just_explain_(true)
188  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const bool just_explain_
Definition: ResultSet.h:873
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
std::string explanation_
Definition: ResultSet.h:872
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838
ResultSet::ResultSet ( int64_t  queue_time_ms,
int64_t  render_time_ms,
const std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 191 of file ResultSet.cpp.

References CPU.

195  , device_id_(-1)
196  , fetched_so_far_(0)
197  , row_set_mem_owner_(row_set_mem_owner)
198  , timings_(QueryExecutionTimings{queue_time_ms, render_time_ms, 0, 0})
200  , just_explain_(true)
201  , cached_row_count_(-1)
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
const bool just_explain_
Definition: ResultSet.h:873
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
QueryExecutionTimings timings_
Definition: ResultSet.h:849
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
size_t fetched_so_far_
Definition: ResultSet.h:843
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
const int device_id_
Definition: ResultSet.h:838
ResultSet::~ResultSet ( )

Definition at line 204 of file ResultSet.cpp.

References CHECK(), and CPU.

204  {
205  if (storage_) {
206  if (!storage_->buff_is_provided_) {
207  CHECK(storage_->getUnderlyingBuffer());
208  free(storage_->getUnderlyingBuffer());
209  }
210  }
211  for (auto& storage : appended_storage_) {
212  if (storage && !storage->buff_is_provided_) {
213  free(storage->getUnderlyingBuffer());
214  }
215  }
219  }
221  CHECK(data_mgr_);
223  }
224 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:461
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863

+ Here is the call graph for this function:

Member Function Documentation

void ResultSet::addCompilationQueueTime ( const int64_t  compilation_queue_time)

Definition at line 476 of file ResultSet.cpp.

476  {
477  timings_.compilation_queue_time += compilation_queue_time;
478 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849
void ResultSet::advanceCursorToNextEntry ( ResultSetRowIterator iter) const
private
size_t ResultSet::advanceCursorToNextEntry ( ) const
private
const ResultSetStorage* ResultSet::allocateStorage ( ) const
const ResultSetStorage* ResultSet::allocateStorage ( int8_t *  ,
const std::vector< int64_t > &   
) const
const ResultSetStorage* ResultSet::allocateStorage ( const std::vector< int64_t > &  ) const
void ResultSet::append ( ResultSet that)

Definition at line 270 of file ResultSet.cpp.

References CHECK(), and CHECK_EQ.

270  {
272  if (!that.storage_) {
273  return;
274  }
275  appended_storage_.push_back(std::move(that.storage_));
278  appended_storage_.back()->query_mem_desc_.getEntryCount());
279  chunks_.insert(chunks_.end(), that.chunks_.begin(), that.chunks_.end());
280  col_buffers_.insert(
281  col_buffers_.end(), that.col_buffers_.begin(), that.col_buffers_.end());
282  frag_offsets_.insert(
283  frag_offsets_.end(), that.frag_offsets_.begin(), that.frag_offsets_.end());
285  that.consistent_frag_sizes_.begin(),
286  that.consistent_frag_sizes_.end());
287  chunk_iters_.insert(
288  chunk_iters_.end(), that.chunk_iters_.begin(), that.chunk_iters_.end());
290  CHECK(that.separate_varlen_storage_valid_);
292  that.serialized_varlen_buffer_.begin(),
293  that.serialized_varlen_buffer_.end());
294  }
295  for (auto& buff : that.literal_buffers_) {
296  literal_buffers_.push_back(std::move(buff));
297  }
298 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void setEntryCount(const size_t val)
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:853
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
CHECK(cgen_state)
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:852
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:856
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:860
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:859
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871

+ Here is the call graph for this function:

void ResultSet::baselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
size_t ResultSet::binSearchRowCount ( ) const
private

Definition at line 377 of file ResultSet.cpp.

References anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

377  {
378  if (!storage_) {
379  return 0;
380  }
381 
382  size_t row_count = storage_->binSearchRowCount();
383  for (auto& s : appended_storage_) {
384  row_count += s->binSearchRowCount();
385  }
386 
387  return get_truncated_row_count(row_count, getLimit(), drop_first_);
388 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
size_t getLimit() const
Definition: ResultSet.cpp:984
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:319
size_t drop_first_
Definition: ResultSet.h:844

+ Here is the call graph for this function:

bool ResultSet::canUseFastBaselineSort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
size_t ResultSet::colCount ( ) const

Definition at line 304 of file ResultSet.cpp.

304  {
305  return just_explain_ ? 1 : targets_.size();
306 }
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
void ResultSet::copyColumnIntoBuffer ( const size_t  column_idx,
int8_t *  output_buffer,
const size_t  output_buffer_size 
) const

For each specified column, this function goes through all available storages and copy its content into a contiguous output_buffer

Definition at line 1172 of file ResultSetIteration.cpp.

References appended_storage_, CHECK(), CHECK_LT, QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getSlotCount(), isDirectColumnarConversionPossible(), query_mem_desc_, and storage_.

1174  {
1176  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1177  CHECK(output_buffer_size > 0);
1178  CHECK(output_buffer);
1179  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1180  size_t out_buff_offset = 0;
1181 
1182  // the main storage:
1183  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1184  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1185  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1186  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1187  CHECK(crt_buffer_size <= output_buffer_size);
1188  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1189 
1190  out_buff_offset += crt_buffer_size;
1191 
1192  // the appended storages:
1193  for (size_t i = 0; i < appended_storage_.size(); i++) {
1194  const size_t crt_storage_row_count =
1195  appended_storage_[i]->query_mem_desc_.getEntryCount();
1196  if (crt_storage_row_count == 0) {
1197  // skip an empty appended storage
1198  continue;
1199  }
1200  CHECK_LT(out_buff_offset, output_buffer_size);
1201  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1202  const size_t column_offset =
1203  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1204  const int8_t* storage_buffer =
1205  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1206  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1207  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1208 
1209  out_buff_offset += crt_buffer_size;
1210  }
1211 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1011

+ Here is the call graph for this function:

void ResultSet::create_active_buffer_set ( BufferSet count_distinct_active_buffer_set) const
private
std::function<bool(const uint32_t, const uint32_t)> ResultSet::createComparator ( const std::list< Analyzer::OrderEntry > &  order_entries,
const bool  use_heap 
)
inlineprivate

Definition at line 774 of file ResultSet.h.

References column_wise_comparator_, DEBUG_TIMER, QueryMemoryDescriptor::didOutputColumnar(), query_mem_desc_, and row_wise_comparator_.

776  {
777  auto timer = DEBUG_TIMER(__func__);
780  std::make_unique<ResultSetComparator<ColumnWiseTargetAccessor>>(
781  order_entries, use_heap, this);
782  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
783  return (*this->column_wise_comparator_)(lhs, rhs);
784  };
785  } else {
786  row_wise_comparator_ = std::make_unique<ResultSetComparator<RowWiseTargetAccessor>>(
787  order_entries, use_heap, this);
788  return [this](const uint32_t lhs, const uint32_t rhs) -> bool {
789  return (*this->row_wise_comparator_)(lhs, rhs);
790  };
791  }
792  }
std::unique_ptr< ResultSetComparator< ColumnWiseTargetAccessor > > column_wise_comparator_
Definition: ResultSet.h:883
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetComparator< RowWiseTargetAccessor > > row_wise_comparator_
Definition: ResultSet.h:882
#define DEBUG_TIMER(name)
Definition: Logger.h:313

+ Here is the call graph for this function:

bool ResultSet::definitelyHasNoRows ( ) const

Definition at line 425 of file ResultSet.cpp.

425  {
426  return !storage_ && !estimator_ && !just_explain_;
427 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const bool just_explain_
Definition: ResultSet.h:873
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862
bool ResultSet::didOutputColumnar ( ) const
inline

Definition at line 517 of file ResultSet.h.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc_.

517 { return this->query_mem_desc_.didOutputColumnar(); }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839

+ Here is the call graph for this function:

void ResultSet::doBaselineSort ( const ExecutorDeviceType  device_type,
const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private
void ResultSet::dropFirstN ( const size_t  n)

Definition at line 99 of file ResultSet.cpp.

References CHECK_EQ.

99  {
101  drop_first_ = n;
102 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t drop_first_
Definition: ResultSet.h:844
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
size_t ResultSet::entryCount ( ) const

Definition at line 753 of file ResultSetIteration.cpp.

References QueryMemoryDescriptor::getEntryCount(), permutation_, and query_mem_desc_.

753  {
754  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
755 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847

+ Here is the call graph for this function:

void ResultSet::fillOneEntry ( const std::vector< int64_t > &  entry)
inline

Definition at line 461 of file ResultSet.h.

References CHECK(), and storage_.

461  {
462  CHECK(storage_);
463  if (storage_->query_mem_desc_.didOutputColumnar()) {
464  storage_->fillOneEntryColWise(entry);
465  } else {
466  storage_->fillOneEntryRowWise(entry);
467  }
468  }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)

+ Here is the call graph for this function:

ResultSet::StorageLookupResult ResultSet::findStorage ( const size_t  entry_idx) const
private

Definition at line 677 of file ResultSet.cpp.

Referenced by makeGeoTargetValue().

677  {
678  auto [stg_idx, fixedup_entry_idx] = getStorageIndex(entry_idx);
679  return {stg_idx ? appended_storage_[stg_idx - 1].get() : storage_.get(),
680  fixedup_entry_idx,
681  stg_idx};
682 }
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:652
AppendedStorage appended_storage_
Definition: ResultSet.h:841
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840

+ Here is the caller graph for this function:

void ResultSet::fixupCountDistinctPointers ( )
private
QueryMemoryDescriptor ResultSet::fixupQueryMemoryDescriptor ( const QueryMemoryDescriptor query_mem_desc)
static

Definition at line 506 of file ResultSet.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), and query_mem_desc.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), GpuSharedMemCodeBuilder::codegenReduction(), QueryExecutionContext::groupBufferToDeinterleavedResults(), QueryMemoryInitializer::initGroups(), QueryMemoryInitializer::QueryMemoryInitializer(), and Executor::reduceMultiDeviceResults().

507  {
508  auto query_mem_desc_copy = query_mem_desc;
509  query_mem_desc_copy.resetGroupColWidths(
510  std::vector<int8_t>(query_mem_desc_copy.getGroupbyColCount(), 8));
511  if (query_mem_desc.didOutputColumnar()) {
512  return query_mem_desc_copy;
513  }
514  query_mem_desc_copy.alignPaddedSlots();
515  return query_mem_desc_copy;
516 }

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t ResultSet::getBufferSizeBytes ( const ExecutorDeviceType  device_type) const

Definition at line 757 of file ResultSetIteration.cpp.

References CHECK(), and storage_.

757  {
758  CHECK(storage_);
759  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
760 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)

+ Here is the call graph for this function:

SQLTypeInfo ResultSet::getColType ( const size_t  col_idx) const

Definition at line 308 of file ResultSet.cpp.

References CHECK_LT, kAVG, kDOUBLE, and kTEXT.

308  {
309  if (just_explain_) {
310  return SQLTypeInfo(kTEXT, false);
311  }
312  CHECK_LT(col_idx, targets_.size());
313  return targets_[col_idx].agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false)
314  : targets_[col_idx].sql_type;
315 }
const bool just_explain_
Definition: ResultSet.h:873
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
#define CHECK_LT(x, y)
Definition: Logger.h:207
Definition: sqltypes.h:53
Definition: sqldefs.h:72
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1344 of file ResultSetIteration.cpp.

References CHECK_NE, and storage_.

1346  {
1347  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1348  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1349  const auto column_offset =
1350  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1351  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1352  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1353  storage_->query_mem_desc_.getEntryCount();
1354  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1355  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1356 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206
const int8_t * ResultSet::getColumnarBuffer ( size_t  column_idx) const

Definition at line 1036 of file ResultSet.cpp.

References CHECK().

1036  {
1038  return storage_->getUnderlyingBuffer() + query_mem_desc_.getColOffInBytes(column_idx);
1039 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
bool isZeroCopyColumnarConversionPossible(size_t column_idx) const
Definition: ResultSet.cpp:1029
CHECK(cgen_state)
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1290 of file ResultSetIteration.cpp.

References storage_.

1292  {
1293  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1294  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1295  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1296 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const std::vector< const int8_t * > & ResultSet::getColumnFrag ( const size_t  storge_idx,
const size_t  col_logical_idx,
int64_t &  global_idx 
) const
private

Definition at line 1143 of file ResultSetIteration.cpp.

References CHECK_EQ, CHECK_GE, CHECK_LE, CHECK_LT, col_buffers_, consistent_frag_sizes_, frag_offsets_, and anonymous_namespace{ResultSetIteration.cpp}::get_frag_id_and_local_idx().

Referenced by lazyReadInt(), makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

1145  {
1146  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1147  if (col_buffers_[storage_idx].size() > 1) {
1148  int64_t frag_id = 0;
1149  int64_t local_idx = global_idx;
1150  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1151  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1152  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1153  } else {
1154  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1155  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1156  CHECK_LE(local_idx, global_idx);
1157  }
1158  CHECK_GE(frag_id, int64_t(0));
1159  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1160  global_idx = local_idx;
1161  return col_buffers_[storage_idx][frag_id];
1162  } else {
1163  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1164  return col_buffers_[storage_idx][0];
1165  }
1166 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define CHECK_GE(x, y)
Definition: Logger.h:210
#define CHECK_LT(x, y)
Definition: Logger.h:207
#define CHECK_LE(x, y)
Definition: Logger.h:208
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:860
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:859
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

InternalTargetValue ResultSet::getColumnInternal ( const int8_t *  buff,
const size_t  entry_idx,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private
size_t ResultSet::getCurrentRowBufferIndex ( ) const

Definition at line 262 of file ResultSet.cpp.

262  {
263  if (crt_row_buff_idx_ == 0) {
264  throw std::runtime_error("current row buffer iteration index is undefined");
265  }
266  return crt_row_buff_idx_ - 1;
267 }
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
Data_Namespace::DataMgr* ResultSet::getDataManager ( ) const
private
int8_t * ResultSet::getDeviceEstimatorBuffer ( ) const

Definition at line 443 of file ResultSet.cpp.

References CHECK(), and GPU.

443  {
447 }
virtual int8_t * getMemoryPtr()=0
CHECK(cgen_state)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863

+ Here is the call graph for this function:

int ResultSet::getDeviceId ( ) const

Definition at line 502 of file ResultSet.cpp.

502  {
503  return device_id_;
504 }
const int device_id_
Definition: ResultSet.h:838
ExecutorDeviceType ResultSet::getDeviceType ( ) const

Definition at line 226 of file ResultSet.cpp.

226  {
227  return device_type_;
228 }
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
int64_t ResultSet::getDistinctBufferRefFromBufferRowwise ( int8_t *  rowwise_target_ptr,
const TargetInfo target_info 
) const
private
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
template<typename ENTRY_TYPE , QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
ENTRY_TYPE ResultSet::getEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Definition at line 1214 of file ResultSetIteration.cpp.

References GroupByBaselineHash, GroupByPerfectHash, and UNREACHABLE.

1216  {
1217  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1218  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1219  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1220  } else {
1221  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1222  }
1223  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1224  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1225  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1226  } else {
1227  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1228  }
1229  } else {
1230  UNREACHABLE() << "Invalid query type is used";
1231  return 0;
1232  }
1233 }
#define UNREACHABLE()
Definition: Logger.h:241
GeoReturnType ResultSet::getGeoReturnType ( ) const
inline

Definition at line 508 of file ResultSet.h.

References geo_return_type_.

508 { return geo_return_type_; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
int ResultSet::getGpuCount ( ) const
private
int8_t * ResultSet::getHostEstimatorBuffer ( ) const

Definition at line 449 of file ResultSet.cpp.

449  {
450  return host_estimator_buffer_;
451 }
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
const std::vector<ColumnLazyFetchInfo>& ResultSet::getLazyFetchInfo ( ) const
inline

Definition at line 537 of file ResultSet.h.

References lazy_fetch_info_.

537  {
538  return lazy_fetch_info_;
539  }
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
size_t ResultSet::getLimit ( ) const

Definition at line 984 of file ResultSet.cpp.

984  {
985  return keep_first_;
986 }
size_t keep_first_
Definition: ResultSet.h:845
size_t ResultSet::getNDVEstimator ( ) const

Definition at line 22 of file CardinalityEstimator.cpp.

References bitmap_set_size(), CHECK(), and CHECK_LE.

22  {
23  CHECK(dynamic_cast<const Analyzer::NDVEstimator*>(estimator_.get()));
25  auto bits_set = bitmap_set_size(host_estimator_buffer_, estimator_->getBufferSize());
26  const auto total_bits = estimator_->getBufferSize() * 8;
27  CHECK_LE(bits_set, total_bits);
28  const auto unset_bits = total_bits - bits_set;
29  const auto ratio = static_cast<double>(unset_bits) / total_bits;
30  if (ratio == 0.) {
31  throw std::runtime_error("Failed to get a high quality cardinality estimation");
32  }
33  return -static_cast<double>(total_bits) * log(ratio);
34 }
CHECK(cgen_state)
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862
#define CHECK_LE(x, y)
Definition: Logger.h:208
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
size_t bitmap_set_size(const int8_t *bitmap, const size_t bitmap_byte_sz)
Definition: CountDistinct.h:37

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRow ( const bool  translate_strings,
const bool  decimal_to_double 
) const

Definition at line 294 of file ResultSetIteration.cpp.

295  {
296  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
297  if (!storage_ && !just_explain_) {
298  return {};
299  }
300  return getNextRowUnlocked(translate_strings, decimal_to_double);
301 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:875
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const bool just_explain_
Definition: ResultSet.h:873
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< TargetValue > ResultSet::getNextRowImpl ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 316 of file ResultSetIteration.cpp.

References CHECK(), and CHECK_EQ.

317  {
318  size_t entry_buff_idx = 0;
319  do {
321  return {};
322  }
323 
324  entry_buff_idx = advanceCursorToNextEntry();
325 
326  if (crt_row_buff_idx_ >= entryCount()) {
328  return {};
329  }
331  ++fetched_so_far_;
332 
333  } while (drop_first_ && fetched_so_far_ <= drop_first_);
334 
335  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
336  CHECK(!row.empty());
337 
338  return row;
339 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
size_t keep_first_
Definition: ResultSet.h:845
size_t drop_first_
Definition: ResultSet.h:844
CHECK(cgen_state)
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
size_t advanceCursorToNextEntry() const

+ Here is the call graph for this function:

std::vector< TargetValue > ResultSet::getNextRowUnlocked ( const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 303 of file ResultSetIteration.cpp.

305  {
306  if (just_explain_) {
307  if (fetched_so_far_) {
308  return {};
309  }
310  fetched_so_far_ = 1;
311  return {explanation_};
312  }
313  return getNextRowImpl(translate_strings, decimal_to_double);
314 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
const bool just_explain_
Definition: ResultSet.h:873
std::string explanation_
Definition: ResultSet.h:872
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
size_t fetched_so_far_
Definition: ResultSet.h:843
OneIntegerColumnRow ResultSet::getOneColRow ( const size_t  index) const

Definition at line 232 of file ResultSetIteration.cpp.

References align_to_int64(), CHECK(), get_key_bytes_rowwise(), and row_ptr_rowwise().

232  {
233  const auto storage_lookup_result = findStorage(global_entry_idx);
234  const auto storage = storage_lookup_result.storage_ptr;
235  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
236  if (storage->isEmptyEntry(local_entry_idx)) {
237  return {0, false};
238  }
239  const auto buff = storage->buff_;
240  CHECK(buff);
242  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
243  const auto key_bytes_with_padding =
245  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
246  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
247  keys_ptr,
248  global_entry_idx,
249  targets_.front(),
250  0,
251  0,
252  false,
253  false,
254  false);
255  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
256  CHECK(scalar_tv);
257  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
258  CHECK(ival_ptr);
259  return {*ival_ptr, true};
260 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
CHECK(cgen_state)
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:677
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

const int8_t ResultSet::getPaddedSlotWidthBytes ( const size_t  slot_idx) const
inline

Definition at line 526 of file ResultSet.h.

References QueryMemoryDescriptor::getPaddedSlotWidthBytes(), and query_mem_desc_.

526  {
527  return query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
528  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const

+ Here is the call graph for this function:

const std::vector< uint32_t > & ResultSet::getPermutationBuffer ( ) const

Definition at line 608 of file ResultSet.cpp.

608  {
609  return permutation_;
610 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
QueryDescriptionType ResultSet::getQueryDescriptionType ( ) const
inline

Definition at line 522 of file ResultSet.h.

References QueryMemoryDescriptor::getQueryDescriptionType(), and query_mem_desc_.

522  {
524  }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

const QueryMemoryDescriptor & ResultSet::getQueryMemDesc ( ) const

Definition at line 429 of file ResultSet.cpp.

References CHECK().

429  {
430  CHECK(storage_);
431  return storage_->query_mem_desc_;
432 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)

+ Here is the call graph for this function:

int64_t ResultSet::getQueueTime ( ) const

Definition at line 480 of file ResultSet.cpp.

int64_t ResultSet::getRenderTime ( ) const

Definition at line 485 of file ResultSet.cpp.

485  {
486  return timings_.render_time;
487 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index) const
TargetValue ResultSet::getRowAt ( const size_t  row_idx,
const size_t  col_idx,
const bool  translate_strings,
const bool  decimal_to_double = true 
) const
std::vector<TargetValue> ResultSet::getRowAt ( const size_t  index,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers,
const std::vector< bool > &  targets_to_skip = {} 
) const
private
std::vector< TargetValue > ResultSet::getRowAtNoTranslations ( const size_t  index,
const std::vector< bool > &  targets_to_skip = {} 
) const

Definition at line 271 of file ResultSetIteration.cpp.

273  {
274  if (logical_index >= entryCount()) {
275  return {};
276  }
277  const auto entry_idx =
278  permutation_.empty() ? logical_index : permutation_[logical_index];
279  return getRowAt(entry_idx, false, false, false, targets_to_skip);
280 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::vector< TargetValue > getRowAt(const size_t index) const
size_t entryCount() const
std::shared_ptr<RowSetMemoryOwner> ResultSet::getRowSetMemOwner ( ) const
inline

Definition at line 482 of file ResultSet.h.

References row_set_mem_owner_.

482  {
483  return row_set_mem_owner_;
484  }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (columnar output, baseline hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1322 of file ResultSetIteration.cpp.

References CHECK_NE, row_ptr_rowwise(), and storage_.

1324  {
1325  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1326  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1327  auto keys_ptr = row_ptr_rowwise(
1328  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1329  const auto column_offset =
1330  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1331  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1332  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1333  const auto storage_buffer = keys_ptr + column_offset;
1334  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1335 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)

+ Here is the call graph for this function:

template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const
private
template<typename ENTRY_TYPE >
ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt ( const size_t  row_idx,
const size_t  target_idx,
const size_t  slot_idx 
) const

Directly accesses the result set's storage buffer for a particular data type (row-wise output, perfect hash group by)

NOTE: Currently, only used in direct columnarization

Definition at line 1305 of file ResultSetIteration.cpp.

References storage_.

1307  {
1308  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1309  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1310  const int8_t* storage_buffer =
1311  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1312  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1313 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
std::tuple< std::vector< bool >, size_t > ResultSet::getSingleSlotTargetBitmap ( ) const

Definition at line 1042 of file ResultSet.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::is_agg(), and kAVG.

1042  {
1043  std::vector<bool> target_bitmap(targets_.size(), true);
1044  size_t num_single_slot_targets = 0;
1045  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1046  const auto& sql_type = targets_[target_idx].sql_type;
1047  if (targets_[target_idx].is_agg && targets_[target_idx].agg_kind == kAVG) {
1048  target_bitmap[target_idx] = false;
1049  } else if (sql_type.is_varlen()) {
1050  target_bitmap[target_idx] = false;
1051  } else {
1052  num_single_slot_targets++;
1053  }
1054  }
1055  return std::make_tuple(std::move(target_bitmap), num_single_slot_targets);
1056 }
bool is_agg(const Analyzer::Expr *expr)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
Definition: sqldefs.h:72

+ Here is the call graph for this function:

std::vector< size_t > ResultSet::getSlotIndicesForTargetIndices ( ) const

Definition at line 1085 of file ResultSet.cpp.

References advance_slot().

1085  {
1086  std::vector<size_t> slot_indices(targets_.size(), 0);
1087  size_t slot_index = 0;
1088  for (size_t target_idx = 0; target_idx < targets_.size(); target_idx++) {
1089  slot_indices[target_idx] = slot_index;
1090  slot_index = advance_slot(slot_index, targets_[target_idx], false);
1091  }
1092  return slot_indices;
1093 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)

+ Here is the call graph for this function:

const ResultSetStorage * ResultSet::getStorage ( ) const

Definition at line 300 of file ResultSet.cpp.

300  {
301  return storage_.get();
302 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
std::pair< size_t, size_t > ResultSet::getStorageIndex ( const size_t  entry_idx) const
private

Returns (storageIdx, entryIdx) pair, where: storageIdx : 0 is storage_, storageIdx-1 is index into appended_storage_. entryIdx : local index into the storage object.

Definition at line 652 of file ResultSet.cpp.

References CHECK_NE, and UNREACHABLE.

Referenced by makeGeoTargetValue(), makeTargetValue(), and makeVarlenTargetValue().

652  {
653  size_t fixedup_entry_idx = entry_idx;
654  auto entry_count = storage_->query_mem_desc_.getEntryCount();
655  const bool is_rowwise_layout = !storage_->query_mem_desc_.didOutputColumnar();
656  if (fixedup_entry_idx < entry_count) {
657  return {0, fixedup_entry_idx};
658  }
659  fixedup_entry_idx -= entry_count;
660  for (size_t i = 0; i < appended_storage_.size(); ++i) {
661  const auto& desc = appended_storage_[i]->query_mem_desc_;
662  CHECK_NE(is_rowwise_layout, desc.didOutputColumnar());
663  entry_count = desc.getEntryCount();
664  if (fixedup_entry_idx < entry_count) {
665  return {i + 1, fixedup_entry_idx};
666  }
667  fixedup_entry_idx -= entry_count;
668  }
669  UNREACHABLE() << "entry_idx = " << entry_idx << ", query_mem_desc_.getEntryCount() = "
671  return {};
672 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define UNREACHABLE()
Definition: Logger.h:241
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
#define CHECK_NE(x, y)
Definition: Logger.h:206

+ Here is the caller graph for this function:

std::shared_ptr< const std::vector< std::string > > ResultSet::getStringDictionaryPayloadCopy ( const int  dict_id) const

Definition at line 988 of file ResultSet.cpp.

References CHECK().

989  {
990  CHECK(executor_);
991  const auto sdp =
992  executor_->getStringDictionaryProxy(dict_id, row_set_mem_owner_, false);
993  return sdp->getDictionary()->copyStrings();
994 }
const Executor * executor_
Definition: ResultSet.h:850
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
CHECK(cgen_state)

+ Here is the call graph for this function:

std::tuple< std::vector< bool >, size_t > ResultSet::getSupportedSingleSlotTargetBitmap ( ) const

This function returns a bitmap and population count of it, where it denotes all supported single-column targets suitable for direct columnarization.

The final goal is to remove the need for such selection, but at the moment for any target that doesn't qualify for direct columnarization, we use the traditional result set's iteration to handle it (e.g., count distinct, approximate count distinct)

Definition at line 1066 of file ResultSet.cpp.

References CHECK(), CHECK_GE, is_distinct_target(), kFLOAT, and kSAMPLE.

1067  {
1069  auto [single_slot_targets, num_single_slot_targets] = getSingleSlotTargetBitmap();
1070 
1071  for (size_t target_idx = 0; target_idx < single_slot_targets.size(); target_idx++) {
1072  const auto& target = targets_[target_idx];
1073  if (single_slot_targets[target_idx] &&
1074  (is_distinct_target(target) ||
1075  (target.is_agg && target.agg_kind == kSAMPLE && target.sql_type == kFLOAT))) {
1076  single_slot_targets[target_idx] = false;
1077  num_single_slot_targets--;
1078  }
1079  }
1080  CHECK_GE(num_single_slot_targets, size_t(0));
1081  return std::make_tuple(std::move(single_slot_targets), num_single_slot_targets);
1082 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
CHECK(cgen_state)
std::tuple< std::vector< bool >, size_t > getSingleSlotTargetBitmap() const
Definition: ResultSet.cpp:1042
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1011

+ Here is the call graph for this function:

const std::vector< TargetInfo > & ResultSet::getTargetInfos ( ) const

Definition at line 434 of file ResultSet.cpp.

434  {
435  return targets_;
436 }
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
const std::vector< int64_t > & ResultSet::getTargetInitVals ( ) const

Definition at line 438 of file ResultSet.cpp.

References CHECK().

438  {
439  CHECK(storage_);
440  return storage_->target_init_vals_;
441 }
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferColwise ( const int8_t *  col_ptr,
const int8_t *  keys_ptr,
const QueryMemoryDescriptor query_mem_desc,
const size_t  local_entry_idx,
const size_t  global_entry_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double 
) const
private

Definition at line 1917 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), TargetInfo::agg_kind, CHECK(), CHECK_GE, anonymous_namespace{ResultSetIteration.cpp}::columnar_elem_ptr(), QueryMemoryDescriptor::didOutputColumnar(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getEntryCount(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_real_str_or_array(), kAVG, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, TargetInfo::sql_type, and QueryMemoryDescriptor::targetGroupbyIndicesSize().

1927  {
1929  const auto col1_ptr = col_ptr;
1930  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1931  const auto next_col_ptr =
1932  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1933  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1934  is_real_str_or_array(target_info))
1935  ? next_col_ptr
1936  : nullptr;
1937  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1938  is_real_str_or_array(target_info))
1939  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1940  : 0;
1941 
1942  // TODO(Saman): add required logics for count distinct
1943  // geospatial target values:
1944  if (target_info.sql_type.is_geometry()) {
1945  return makeGeoTargetValue(
1946  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1947  }
1948 
1949  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1950  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1951  CHECK(col2_ptr);
1952  CHECK(compact_sz2);
1953  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1954  return target_info.agg_kind == kAVG
1955  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1956  : makeVarlenTargetValue(ptr1,
1957  compact_sz1,
1958  ptr2,
1959  compact_sz2,
1960  target_info,
1961  target_logical_idx,
1962  translate_strings,
1963  global_entry_idx);
1964  }
1966  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1967  return makeTargetValue(ptr1,
1968  compact_sz1,
1969  target_info,
1970  target_logical_idx,
1971  translate_strings,
1973  global_entry_idx);
1974  }
1975  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1976  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1977  CHECK_GE(key_idx, 0);
1978  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1979  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1980  key_width,
1981  target_info,
1982  target_logical_idx,
1983  translate_strings,
1985  global_entry_idx);
1986 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
size_t targetGroupbyIndicesSize() const
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:41
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
bool is_real_str_or_array(const TargetInfo &target_info)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
bool is_geometry() const
Definition: sqltypes.h:427
Definition: sqldefs.h:72
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

TargetValue ResultSet::getTargetValueFromBufferRowwise ( int8_t *  rowwise_target_ptr,
int8_t *  keys_ptr,
const size_t  entry_buff_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  slot_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const bool  fixup_count_distinct_pointers 
) const
private

Definition at line 1990 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK(), QueryMemoryDescriptor::count_distinct_descriptors_, SQLTypeInfo::get_compression(), QueryMemoryDescriptor::getEffectiveKeyWidth(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), QueryMemoryDescriptor::getTargetGroupbyIndex(), QueryMemoryDescriptor::hasKeylessHash(), TargetInfo::is_agg, SQLTypeInfo::is_array(), is_distinct_target(), SQLTypeInfo::is_geometry(), is_real_str_or_array(), SQLTypeInfo::is_string(), QueryMemoryDescriptor::isSingleColumnGroupByWithPerfectHash(), kAVG, kENCODING_NONE, anonymous_namespace{ResultSetIteration.cpp}::make_avg_target_value(), makeGeoTargetValue(), makeTargetValue(), makeVarlenTargetValue(), query_mem_desc_, row_set_mem_owner_, separate_varlen_storage_valid_, TargetInfo::sql_type, storage_, QueryMemoryDescriptor::targetGroupbyIndicesSize(), and UNLIKELY.

1999  {
2000  if (UNLIKELY(fixup_count_distinct_pointers)) {
2001  if (is_distinct_target(target_info)) {
2002  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2003  const auto remote_ptr = *count_distinct_ptr_ptr;
2004  if (remote_ptr) {
2005  const auto ptr = storage_->mappedPtr(remote_ptr);
2006  if (ptr) {
2007  *count_distinct_ptr_ptr = ptr;
2008  } else {
2009  // need to create a zero filled buffer for this remote_ptr
2010  const auto& count_distinct_desc =
2011  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2012  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2013  ? count_distinct_desc.bitmapSizeBytes()
2014  : count_distinct_desc.bitmapPaddedSizeBytes();
2015  auto count_distinct_buffer =
2016  row_set_mem_owner_->allocateCountDistinctBuffer(bitmap_byte_sz);
2017  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2018  }
2019  }
2020  }
2021  return int64_t(0);
2022  }
2023  if (target_info.sql_type.is_geometry()) {
2024  return makeGeoTargetValue(
2025  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2026  }
2027 
2028  auto ptr1 = rowwise_target_ptr;
2029  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2031  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2032  // Single column perfect hash group by can utilize one slot for both the key and the
2033  // target value if both values fit in 8 bytes. Use the target value actual size for
2034  // this case. If they don't, the target value should be 8 bytes, so we can still use
2035  // the actual size rather than the compact size.
2036  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2037  }
2038 
2039  // logic for deciding width of column
2040  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2041  const auto ptr2 =
2042  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2043  int8_t compact_sz2 = 0;
2044  // Skip reading the second slot if we have a none encoded string and are using
2045  // the none encoded strings buffer attached to ResultSetStorage
2047  (target_info.sql_type.is_array() ||
2048  (target_info.sql_type.is_string() &&
2049  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2050  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2051  }
2052  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2053  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2054  }
2055  CHECK(ptr2);
2056  return target_info.agg_kind == kAVG
2057  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2058  : makeVarlenTargetValue(ptr1,
2059  compact_sz1,
2060  ptr2,
2061  compact_sz2,
2062  target_info,
2063  target_logical_idx,
2064  translate_strings,
2065  entry_buff_idx);
2066  }
2068  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2069  return makeTargetValue(ptr1,
2070  compact_sz1,
2071  target_info,
2072  target_logical_idx,
2073  translate_strings,
2075  entry_buff_idx);
2076  }
2077  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2078  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2079  return makeTargetValue(ptr1,
2080  key_width,
2081  target_info,
2082  target_logical_idx,
2083  translate_strings,
2085  entry_buff_idx);
2086 }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
size_t getEffectiveKeyWidth() const
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
size_t targetGroupbyIndicesSize() const
CountDistinctDescriptors count_distinct_descriptors_
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:41
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
#define UNLIKELY(x)
Definition: likely.h:20
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:266
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
bool is_geometry() const
Definition: sqltypes.h:427
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
bool is_string() const
Definition: sqltypes.h:415
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:72
bool is_array() const
Definition: sqltypes.h:423
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const

+ Here is the call graph for this function:

InternalTargetValue ResultSet::getVarlenOrderEntry ( const int64_t  str_ptr,
const size_t  str_len 
) const
private

Definition at line 627 of file ResultSetIteration.cpp.

References CHECK(), copy_from_gpu(), CPU, device_id_, device_type_, QueryMemoryDescriptor::getExecutor(), GPU, query_mem_desc_, and row_set_mem_owner_.

628  {
629  char* host_str_ptr{nullptr};
630  std::vector<int8_t> cpu_buffer;
632  cpu_buffer.resize(str_len);
633  const auto executor = query_mem_desc_.getExecutor();
634  CHECK(executor);
635  auto& data_mgr = executor->catalog_->getDataMgr();
636  copy_from_gpu(&data_mgr,
637  &cpu_buffer[0],
638  static_cast<CUdeviceptr>(str_ptr),
639  str_len,
640  device_id_);
641  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
642  } else {
644  host_str_ptr = reinterpret_cast<char*>(str_ptr);
645  }
646  std::string str(host_str_ptr, str_len);
647  return InternalTargetValue(row_set_mem_owner_->addString(str));
648 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
CHECK(cgen_state)
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:838

+ Here is the call graph for this function:

void ResultSet::holdChunkIterators ( const std::shared_ptr< std::list< ChunkIter >>  chunk_iters)
inline

Definition at line 475 of file ResultSet.h.

References chunk_iters_.

475  {
476  chunk_iters_.push_back(chunk_iters);
477  }
std::vector< std::shared_ptr< std::list< ChunkIter > > > chunk_iters_
Definition: ResultSet.h:853
void ResultSet::holdChunks ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks)
inline

Definition at line 472 of file ResultSet.h.

References chunks_.

472  {
473  chunks_ = chunks;
474  }
std::list< std::shared_ptr< Chunk_NS::Chunk > > chunks_
Definition: ResultSet.h:852
void ResultSet::holdLiterals ( std::vector< int8_t > &  literal_buff)
inline

Definition at line 478 of file ResultSet.h.

References literal_buffers_.

478  {
479  literal_buffers_.push_back(std::move(literal_buff));
480  }
std::vector< std::vector< int8_t > > literal_buffers_
Definition: ResultSet.h:856
void ResultSet::initializeStorage ( ) const

Definition at line 992 of file ResultSetReduction.cpp.

992  {
994  storage_->initializeColWise();
995  } else {
996  storage_->initializeRowWise();
997  }
998 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
std::vector< uint32_t > ResultSet::initPermutationBuffer ( const size_t  start,
const size_t  step 
)
private

Definition at line 589 of file ResultSet.cpp.

References CHECK(), CHECK_NE, and DEBUG_TIMER.

590  {
591  auto timer = DEBUG_TIMER(__func__);
592  CHECK_NE(size_t(0), step);
593  std::vector<uint32_t> permutation;
594  const auto total_entries = query_mem_desc_.getEntryCount();
595  permutation.reserve(total_entries / step);
596  for (size_t i = start; i < total_entries; i += step) {
597  const auto storage_lookup_result = findStorage(i);
598  const auto lhs_storage = storage_lookup_result.storage_ptr;
599  const auto off = storage_lookup_result.fixedup_entry_idx;
600  CHECK(lhs_storage);
601  if (!lhs_storage->isEmptyEntry(off)) {
602  permutation.emplace_back(i);
603  }
604  }
605  return permutation;
606 }
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
CHECK(cgen_state)
#define CHECK_NE(x, y)
Definition: Logger.h:206
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:677
#define DEBUG_TIMER(name)
Definition: Logger.h:313

+ Here is the call graph for this function:

bool ResultSet::isDirectColumnarConversionPossible ( ) const

Determines if it is possible to directly form a ColumnarResults class from this result set, bypassing the default columnarization.

NOTE: If there exists a permutation vector (i.e., in some ORDER BY queries), it becomes equivalent to the row-wise columnarization.

Definition at line 1011 of file ResultSet.cpp.

References g_enable_direct_columnarization, GroupByBaselineHash, GroupByPerfectHash, and Projection.

Referenced by copyColumnIntoBuffer().

+ Here is the caller graph for this function:

bool ResultSet::isExplain ( ) const

Definition at line 498 of file ResultSet.cpp.

498  {
499  return just_explain_;
500 }
const bool just_explain_
Definition: ResultSet.h:873
bool ResultSet::isGeoColOnGpu ( const size_t  col_idx) const

Definition at line 1485 of file ResultSetIteration.cpp.

References CHECK_LT, device_type_, GPU, IS_GEO, lazy_fetch_info_, separate_varlen_storage_valid_, targets_, and to_string().

1485  {
1486  // This should match the logic in makeGeoTargetValue which ultimately calls
1487  // fetch_data_from_gpu when the geo column is on the device.
1488  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1489  // utility function that handles this logic in one place
1490  CHECK_LT(col_idx, targets_.size());
1491  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1492  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1493  " is not a geo column. It is of type " +
1494  targets_[col_idx].sql_type.get_type_name() + ".");
1495  }
1496 
1497  const auto& target_info = targets_[col_idx];
1498  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1499  return false;
1500  }
1501 
1502  if (!lazy_fetch_info_.empty()) {
1503  CHECK_LT(col_idx, lazy_fetch_info_.size());
1504  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1505  return false;
1506  }
1507  }
1508 
1510 }
std::string to_string(char const *&&v)
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
#define IS_GEO(T)
Definition: sqltypes.h:173

+ Here is the call graph for this function:

bool ResultSet::isNull ( const SQLTypeInfo ti,
const InternalTargetValue val,
const bool  float_argument_input 
)
staticprivate

Definition at line 2225 of file ResultSetIteration.cpp.

References CHECK(), SQLTypeInfo::get_elem_type(), InternalTargetValue::i1, InternalTargetValue::i2, SQLTypeInfo::is_column(), InternalTargetValue::isInt(), InternalTargetValue::isNull(), InternalTargetValue::isPair(), InternalTargetValue::isStr(), NULL_DOUBLE, null_val_bit_pattern(), and pair_to_double().

2227  {
2228  const auto& ti_ = (ti.is_column() ? ti.get_elem_type() : ti);
2229  if (ti_.get_notnull()) {
2230  return false;
2231  }
2232  if (val.isInt()) {
2233  return val.i1 == null_val_bit_pattern(ti_, float_argument_input);
2234  }
2235  if (val.isPair()) {
2236  return !val.i2 ||
2237  pair_to_double({val.i1, val.i2}, ti_, float_argument_input) == NULL_DOUBLE;
2238  }
2239  if (val.isStr()) {
2240  return !val.i1;
2241  }
2242  CHECK(val.isNull());
2243  return true;
2244 }
#define NULL_DOUBLE
Definition: sqltypes.h:185
bool isPair() const
Definition: TargetValue.h:67
bool isStr() const
Definition: TargetValue.h:71
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
CHECK(cgen_state)
bool isNull() const
Definition: TargetValue.h:69
bool is_column() const
Definition: sqltypes.h:428
bool isInt() const
Definition: TargetValue.h:65
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:622

+ Here is the call graph for this function:

const bool ResultSet::isPermutationBufferEmpty ( ) const
inline

Definition at line 487 of file ResultSet.h.

References permutation_.

487 { return permutation_.empty(); };
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
bool ResultSet::isRowAtEmpty ( const size_t  index) const

Definition at line 282 of file ResultSetIteration.cpp.

282  {
283  if (logical_index >= entryCount()) {
284  return true;
285  }
286  const auto entry_idx =
287  permutation_.empty() ? logical_index : permutation_[logical_index];
288  const auto storage_lookup_result = findStorage(entry_idx);
289  const auto storage = storage_lookup_result.storage_ptr;
290  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
291  return storage->isEmptyEntry(local_entry_idx);
292 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:677
size_t entryCount() const
bool ResultSet::isTruncated ( ) const

Definition at line 494 of file ResultSet.cpp.

494  {
495  return keep_first_ + drop_first_;
496 }
size_t keep_first_
Definition: ResultSet.h:845
size_t drop_first_
Definition: ResultSet.h:844
bool ResultSet::isZeroCopyColumnarConversionPossible ( size_t  column_idx) const

Definition at line 1029 of file ResultSet.cpp.

References Projection.

1029  {
1032  appended_storage_.empty() && storage_ &&
1033  (lazy_fetch_info_.empty() || !lazy_fetch_info_[column_idx].is_lazily_fetched);
1034 }
AppendedStorage appended_storage_
Definition: ResultSet.h:841
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
QueryDescriptionType getQueryDescriptionType() const
void ResultSet::keepFirstN ( const size_t  n)

Definition at line 94 of file ResultSet.cpp.

References CHECK_EQ.

94  {
96  keep_first_ = n;
97 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
size_t keep_first_
Definition: ResultSet.h:845
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
int64_t ResultSet::lazyReadInt ( const int64_t  ival,
const size_t  target_logical_idx,
const StorageLookupResult storage_lookup_result 
) const
private

Definition at line 650 of file ResultSetIteration.cpp.

References CHECK(), CHECK_LT, ChunkIter_get_nth(), col_buffers_, ResultSet::StorageLookupResult::fixedup_entry_idx, getColumnFrag(), VarlenDatum::is_null, kENCODING_NONE, lazy_decode(), lazy_fetch_info_, VarlenDatum::length, VarlenDatum::pointer, row_set_mem_owner_, ResultSet::StorageLookupResult::storage_idx, and targets_.

652  {
653  if (!lazy_fetch_info_.empty()) {
654  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
655  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
656  if (col_lazy_fetch.is_lazily_fetched) {
657  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
658  col_buffers_.size());
659  int64_t ival_copy = ival;
660  auto& frag_col_buffers =
661  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
662  target_logical_idx,
663  ival_copy);
664  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
665  CHECK_LT(target_logical_idx, targets_.size());
666  const TargetInfo& target_info = targets_[target_logical_idx];
667  CHECK(!target_info.is_agg);
668  if (target_info.sql_type.is_string() &&
669  target_info.sql_type.get_compression() == kENCODING_NONE) {
670  VarlenDatum vd;
671  bool is_end{false};
673  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
674  storage_lookup_result.fixedup_entry_idx,
675  false,
676  &vd,
677  &is_end);
678  CHECK(!is_end);
679  if (vd.is_null) {
680  return 0;
681  }
682  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
683  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
684  }
685  return lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
686  }
687  }
688  return ival;
689 }
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
bool is_null
Definition: sqltypes.h:75
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
int8_t * pointer
Definition: sqltypes.h:74
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
CHECK(cgen_state)
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
size_t length
Definition: sqltypes.h:73

+ Here is the call graph for this function:

TargetValue ResultSet::makeGeoTargetValue ( const int8_t *  geo_target_ptr,
const size_t  slot_idx,
const TargetInfo target_info,
const size_t  target_logical_idx,
const size_t  entry_buff_idx 
) const
private

Definition at line 1516 of file ResultSetIteration.cpp.

References advance_to_next_columnar_target_buff(), CHECK(), CHECK_EQ, CHECK_LT, col_buffers_, device_id_, device_type_, QueryMemoryDescriptor::didOutputColumnar(), findStorage(), geo_return_type_, SQLTypeInfo::get_type(), SQLTypeInfo::get_type_name(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), QueryMemoryDescriptor::getPaddedColWidthForRange(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), ColumnLazyFetchInfo::is_lazily_fetched, kLINESTRING, kMULTIPOLYGON, kPOINT, kPOLYGON, lazy_fetch_info_, ColumnLazyFetchInfo::local_col_id, query_mem_desc_, read_int_from_buff(), separate_varlen_storage_valid_, serialized_varlen_buffer_, TargetInfo::sql_type, and UNREACHABLE.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1520  {
1521  CHECK(target_info.sql_type.is_geometry());
1522 
1523  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1524  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1525  };
1526 
1527  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1528  const auto storage_info = findStorage(entry_buff_idx);
1529  auto crt_geo_col_ptr = geo_target_ptr;
1530  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1531  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1532  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1533  }
1534  // adjusting the column pointer to represent a pointer to the geo target value
1535  return crt_geo_col_ptr +
1536  storage_info.fixedup_entry_idx *
1537  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1538  slot_idx + range);
1539  };
1540 
1541  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1543  ? getNextTargetBufferColWise(slot_idx, range)
1544  : getNextTargetBufferRowWise(slot_idx, range);
1545  };
1546 
1547  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1548  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1550  };
1551 
1552  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1553  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1555  };
1556 
1557  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1558  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1560  };
1561 
1562  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1563  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1565  };
1566 
1567  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1568  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1570  };
1571 
1572  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1573  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1575  };
1576 
1577  auto getFragColBuffers = [&]() -> decltype(auto) {
1578  const auto storage_idx = getStorageIndex(entry_buff_idx);
1579  CHECK_LT(storage_idx.first, col_buffers_.size());
1580  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1581  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1582  };
1583 
1584  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1585 
1586  auto getDataMgr = [&]() {
1587  auto executor = query_mem_desc_.getExecutor();
1588  CHECK(executor);
1589  auto& data_mgr = executor->catalog_->getDataMgr();
1590  return &data_mgr;
1591  };
1592 
1593  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1594  const auto storage_idx = getStorageIndex(entry_buff_idx);
1595  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1596  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1597  return varlen_buffer;
1598  };
1599 
1600  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1601  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1602  return TargetValue(nullptr);
1603  }
1604 
1605  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1606  if (!lazy_fetch_info_.empty()) {
1607  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1608  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1609  }
1610 
1611  switch (target_info.sql_type.get_type()) {
1612  case kPOINT: {
1613  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1614  const auto& varlen_buffer = getSeparateVarlenStorage();
1615  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1616  varlen_buffer.size());
1617 
1618  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1619  target_info.sql_type,
1621  nullptr,
1622  false,
1623  device_id_,
1624  reinterpret_cast<int64_t>(
1625  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1626  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1627  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1628  const auto& frag_col_buffers = getFragColBuffers();
1629  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1630  target_info.sql_type,
1632  frag_col_buffers[col_lazy_fetch->local_col_id],
1633  getCoordsDataPtr(geo_target_ptr));
1634  } else {
1635  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1636  target_info.sql_type,
1638  is_gpu_fetch ? getDataMgr() : nullptr,
1639  is_gpu_fetch,
1640  device_id_,
1641  getCoordsDataPtr(geo_target_ptr),
1642  getCoordsLength(geo_target_ptr));
1643  }
1644  break;
1645  }
1646  case kLINESTRING: {
1647  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1648  const auto& varlen_buffer = getSeparateVarlenStorage();
1649  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1650  varlen_buffer.size());
1651 
1652  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1653  target_info.sql_type,
1655  nullptr,
1656  false,
1657  device_id_,
1658  reinterpret_cast<int64_t>(
1659  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1660  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1661  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1662  const auto& frag_col_buffers = getFragColBuffers();
1663  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1664  target_info.sql_type,
1666  frag_col_buffers[col_lazy_fetch->local_col_id],
1667  getCoordsDataPtr(geo_target_ptr));
1668  } else {
1669  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1670  target_info.sql_type,
1672  is_gpu_fetch ? getDataMgr() : nullptr,
1673  is_gpu_fetch,
1674  device_id_,
1675  getCoordsDataPtr(geo_target_ptr),
1676  getCoordsLength(geo_target_ptr));
1677  }
1678  break;
1679  }
1680  case kPOLYGON: {
1681  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1682  const auto& varlen_buffer = getSeparateVarlenStorage();
1683  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1684  varlen_buffer.size());
1685 
1686  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1687  target_info.sql_type,
1689  nullptr,
1690  false,
1691  device_id_,
1692  reinterpret_cast<int64_t>(
1693  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1694  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1695  reinterpret_cast<int64_t>(
1696  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1697  static_cast<int64_t>(
1698  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1699  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1700  const auto& frag_col_buffers = getFragColBuffers();
1701 
1702  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1703  target_info.sql_type,
1705  frag_col_buffers[col_lazy_fetch->local_col_id],
1706  getCoordsDataPtr(geo_target_ptr),
1707  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1708  getCoordsDataPtr(geo_target_ptr));
1709  } else {
1710  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1711  target_info.sql_type,
1713  is_gpu_fetch ? getDataMgr() : nullptr,
1714  is_gpu_fetch,
1715  device_id_,
1716  getCoordsDataPtr(geo_target_ptr),
1717  getCoordsLength(geo_target_ptr),
1718  getRingSizesPtr(geo_target_ptr),
1719  getRingSizesLength(geo_target_ptr) * 4);
1720  }
1721  break;
1722  }
1723  case kMULTIPOLYGON: {
1724  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1725  const auto& varlen_buffer = getSeparateVarlenStorage();
1726  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1727  varlen_buffer.size());
1728 
1729  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1730  target_info.sql_type,
1732  nullptr,
1733  false,
1734  device_id_,
1735  reinterpret_cast<int64_t>(
1736  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1737  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1738  reinterpret_cast<int64_t>(
1739  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1740  static_cast<int64_t>(
1741  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1742  reinterpret_cast<int64_t>(
1743  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1744  static_cast<int64_t>(
1745  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1746  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1747  const auto& frag_col_buffers = getFragColBuffers();
1748 
1749  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1750  target_info.sql_type,
1752  frag_col_buffers[col_lazy_fetch->local_col_id],
1753  getCoordsDataPtr(geo_target_ptr),
1754  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1755  getCoordsDataPtr(geo_target_ptr),
1756  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1757  getCoordsDataPtr(geo_target_ptr));
1758  } else {
1759  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1760  target_info.sql_type,
1762  is_gpu_fetch ? getDataMgr() : nullptr,
1763  is_gpu_fetch,
1764  device_id_,
1765  getCoordsDataPtr(geo_target_ptr),
1766  getCoordsLength(geo_target_ptr),
1767  getRingSizesPtr(geo_target_ptr),
1768  getRingSizesLength(geo_target_ptr) * 4,
1769  getPolyRingsPtr(geo_target_ptr),
1770  getPolyRingsLength(geo_target_ptr) * 4);
1771  }
1772  break;
1773  }
1774  default:
1775  throw std::runtime_error("Unknown Geometry type encountered: " +
1776  target_info.sql_type.get_type_name());
1777  }
1778  UNREACHABLE();
1779  return TargetValue(nullptr);
1780 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:652
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define UNREACHABLE()
Definition: Logger.h:241
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:258
const int local_col_id
Definition: ResultSet.h:235
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:677
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
std::string get_type_name() const
Definition: sqltypes.h:361
const bool is_lazily_fetched
Definition: ResultSet.h:234
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
bool is_geometry() const
Definition: sqltypes.h:427
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
const int device_id_
Definition: ResultSet.h:838

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeTargetValue ( const int8_t *  ptr,
const int8_t  compact_sz,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const bool  decimal_to_double,
const size_t  entry_buff_idx 
) const
private

Definition at line 1783 of file ResultSetIteration.cpp.

References TargetInfo::agg_kind, CHECK(), CHECK_EQ, CHECK_GE, CHECK_LT, col_buffers_, count_distinct_set_size(), decimal_to_int_type(), executor_, exp_to_scale(), QueryMemoryDescriptor::forceFourByteFloat(), get_compact_type(), SQLTypeInfo::get_elem_type(), getColumnFrag(), QueryMemoryDescriptor::getCountDistinctDescriptor(), getStorageIndex(), inline_int_null_val(), anonymous_namespace{ResultSetIteration.cpp}::int_resize_cast(), TargetInfo::is_agg, SQLTypeInfo::is_column(), SQLTypeInfo::is_date_in_days(), is_distinct_target(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kBIGINT, kENCODING_DICT, kFLOAT, kMAX, kMIN, kSINGLE_VALUE, kSUM, lazy_decode(), lazy_fetch_info_, NULL_DOUBLE, NULL_INT, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1789  {
1790  auto actual_compact_sz = compact_sz;
1791  const auto& type_info =
1792  (target_info.sql_type.is_column() ? target_info.sql_type.get_elem_type()
1793  : target_info.sql_type);
1794  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1796  actual_compact_sz = sizeof(float);
1797  } else {
1798  actual_compact_sz = sizeof(double);
1799  }
1800  if (target_info.is_agg &&
1801  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1802  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1803  target_info.agg_kind == kSINGLE_VALUE)) {
1804  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1805  // padded size to 4 bytes to properly read each value.
1806  actual_compact_sz = sizeof(float);
1807  }
1808  }
1809  if (get_compact_type(target_info).is_date_in_days()) {
1810  // Dates encoded in days are converted to 8 byte values on read.
1811  actual_compact_sz = sizeof(int64_t);
1812  }
1813 
1814  // String dictionary keys are read as 32-bit values regardless of encoding
1815  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1816  type_info.get_comp_param()) {
1817  actual_compact_sz = sizeof(int32_t);
1818  }
1819 
1820  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1821  const auto& chosen_type = get_compact_type(target_info);
1822  if (!lazy_fetch_info_.empty()) {
1823  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1824  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1825  if (col_lazy_fetch.is_lazily_fetched) {
1826  CHECK_GE(ival, 0);
1827  const auto storage_idx = getStorageIndex(entry_buff_idx);
1828  CHECK_LT(storage_idx.first, col_buffers_.size());
1829  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1830  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1831  ival = lazy_decode(
1832  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1833  if (chosen_type.is_fp()) {
1834  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1835  if (chosen_type.get_type() == kFLOAT) {
1836  return ScalarTargetValue(static_cast<float>(dval));
1837  } else {
1838  return ScalarTargetValue(dval);
1839  }
1840  }
1841  }
1842  }
1843  if (chosen_type.is_fp()) {
1844  switch (actual_compact_sz) {
1845  case 8: {
1846  const auto dval = *reinterpret_cast<const double*>(ptr);
1847  return chosen_type.get_type() == kFLOAT
1848  ? ScalarTargetValue(static_cast<const float>(dval))
1849  : ScalarTargetValue(dval);
1850  }
1851  case 4: {
1852  CHECK_EQ(kFLOAT, chosen_type.get_type());
1853  return *reinterpret_cast<const float*>(ptr);
1854  }
1855  default:
1856  CHECK(false);
1857  }
1858  }
1859  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1860  chosen_type.is_timeinterval()) {
1861  if (is_distinct_target(target_info)) {
1863  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1864  }
1865  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1866  // right type instead
1867  if (inline_int_null_val(chosen_type) ==
1868  int_resize_cast(ival, chosen_type.get_logical_size())) {
1869  return inline_int_null_val(type_info);
1870  }
1871  return ival;
1872  }
1873  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1874  if (translate_strings) {
1875  if (static_cast<int32_t>(ival) ==
1876  NULL_INT) { // TODO(alex): this isn't nice, fix it
1877  return NullableString(nullptr);
1878  }
1879  StringDictionaryProxy* sdp{nullptr};
1880  if (!chosen_type.get_comp_param()) {
1881  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1882  } else {
1883  sdp = executor_
1884  ? executor_->getStringDictionaryProxy(
1885  chosen_type.get_comp_param(), row_set_mem_owner_, false)
1886  : row_set_mem_owner_->getStringDictProxy(chosen_type.get_comp_param());
1887  }
1888  return NullableString(sdp->getString(ival));
1889  } else {
1890  return static_cast<int64_t>(static_cast<int32_t>(ival));
1891  }
1892  }
1893  if (chosen_type.is_decimal()) {
1894  if (decimal_to_double) {
1895  if (target_info.is_agg &&
1896  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1897  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1898  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1899  return NULL_DOUBLE;
1900  }
1901  if (ival ==
1902  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1903  return NULL_DOUBLE;
1904  }
1905  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1906  }
1907  return ival;
1908  }
1909  CHECK(false);
1910  return TargetValue(int64_t(0));
1911 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:652
#define NULL_DOUBLE
Definition: sqltypes.h:185
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
Definition: sqldefs.h:75
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
#define NULL_INT
Definition: sqltypes.h:182
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:311
bool is_column() const
Definition: sqltypes.h:428
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool is_date_in_days() const
Definition: sqltypes.h:630
int64_t int_resize_cast(const int64_t ival, const size_t sz)
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
Definition: sqldefs.h:74
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:622
Definition: sqldefs.h:72
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TargetValue ResultSet::makeVarlenTargetValue ( const int8_t *  ptr1,
const int8_t  compact_sz1,
const int8_t *  ptr2,
const int8_t  compact_sz2,
const TargetInfo target_info,
const size_t  target_logical_idx,
const bool  translate_strings,
const size_t  entry_buff_idx 
) const
private

Definition at line 1359 of file ResultSetIteration.cpp.

References anonymous_namespace{ResultSetIteration.cpp}::build_array_target_value(), CHECK(), CHECK_EQ, CHECK_GE, CHECK_GT, CHECK_LT, ChunkIter_get_nth(), col_buffers_, copy_from_gpu(), device_id_, device_type_, executor_, SQLTypeInfo::get_array_context_logical_size(), SQLTypeInfo::get_compression(), SQLTypeInfo::get_elem_type(), SQLTypeInfo::get_type(), getColumnFrag(), QueryMemoryDescriptor::getExecutor(), getStorageIndex(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_array(), VarlenDatum::is_null, SQLTypeInfo::is_string(), kARRAY, kENCODING_NONE, lazy_fetch_info_, VarlenDatum::length, run_benchmark_import::optional, VarlenDatum::pointer, query_mem_desc_, read_int_from_buff(), row_set_mem_owner_, separate_varlen_storage_valid_, serialized_varlen_buffer_, and TargetInfo::sql_type.

Referenced by getTargetValueFromBufferColwise(), and getTargetValueFromBufferRowwise().

1366  {
1367  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1368  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1369  if (varlen_ptr < 0) {
1370  CHECK_EQ(-1, varlen_ptr);
1371  if (target_info.sql_type.get_type() == kARRAY) {
1372  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1373  }
1374  return TargetValue(nullptr);
1375  }
1376  const auto storage_idx = getStorageIndex(entry_buff_idx);
1377  if (target_info.sql_type.is_string()) {
1378  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1379  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1380  const auto& varlen_buffer_for_storage =
1381  serialized_varlen_buffer_[storage_idx.first];
1382  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1383  return varlen_buffer_for_storage[varlen_ptr];
1384  } else if (target_info.sql_type.get_type() == kARRAY) {
1385  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1386  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1387  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1388 
1389  return build_array_target_value(
1390  target_info.sql_type,
1391  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1392  varlen_buffer[varlen_ptr].size(),
1393  translate_strings,
1395  executor_);
1396  } else {
1397  CHECK(false);
1398  }
1399  }
1400  if (!lazy_fetch_info_.empty()) {
1401  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1402  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1403  if (col_lazy_fetch.is_lazily_fetched) {
1404  const auto storage_idx = getStorageIndex(entry_buff_idx);
1405  CHECK_LT(storage_idx.first, col_buffers_.size());
1406  auto& frag_col_buffers =
1407  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1408  bool is_end{false};
1409  if (target_info.sql_type.is_string()) {
1410  VarlenDatum vd;
1411  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1412  frag_col_buffers[col_lazy_fetch.local_col_id])),
1413  varlen_ptr,
1414  false,
1415  &vd,
1416  &is_end);
1417  CHECK(!is_end);
1418  if (vd.is_null) {
1419  return TargetValue(nullptr);
1420  }
1421  CHECK(vd.pointer);
1422  CHECK_GT(vd.length, 0u);
1423  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1424  return fetched_str;
1425  } else {
1426  CHECK(target_info.sql_type.is_array());
1427  ArrayDatum ad;
1428  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1429  frag_col_buffers[col_lazy_fetch.local_col_id])),
1430  varlen_ptr,
1431  &ad,
1432  &is_end);
1433  CHECK(!is_end);
1434  if (ad.is_null) {
1435  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1436  }
1437  CHECK_GE(ad.length, 0u);
1438  if (ad.length > 0) {
1439  CHECK(ad.pointer);
1440  }
1441  return build_array_target_value(target_info.sql_type,
1442  ad.pointer,
1443  ad.length,
1444  translate_strings,
1446  executor_);
1447  }
1448  }
1449  }
1450  if (!varlen_ptr) {
1451  if (target_info.sql_type.is_array()) {
1452  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1453  }
1454  return TargetValue(nullptr);
1455  }
1456  auto length = read_int_from_buff(ptr2, compact_sz2);
1457  if (target_info.sql_type.is_array()) {
1458  const auto& elem_ti = target_info.sql_type.get_elem_type();
1459  length *= elem_ti.get_array_context_logical_size();
1460  }
1461  std::vector<int8_t> cpu_buffer;
1462  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1463  cpu_buffer.resize(length);
1464  const auto executor = query_mem_desc_.getExecutor();
1465  CHECK(executor);
1466  auto& data_mgr = executor->catalog_->getDataMgr();
1467  copy_from_gpu(&data_mgr,
1468  &cpu_buffer[0],
1469  static_cast<CUdeviceptr>(varlen_ptr),
1470  length,
1471  device_id_);
1472  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1473  }
1474  if (target_info.sql_type.is_array()) {
1475  return build_array_target_value(target_info.sql_type,
1476  reinterpret_cast<const int8_t*>(varlen_ptr),
1477  length,
1478  translate_strings,
1480  executor_);
1481  }
1482  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1483 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:652
bool is_null
Definition: sqltypes.h:75
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:870
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:258
#define CHECK_GT(x, y)
Definition: Logger.h:209
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
int8_t * pointer
Definition: sqltypes.h:74
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:130
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:846
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:157
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:857
#define CHECK_LT(x, y)
Definition: Logger.h:207
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:266
int get_array_context_logical_size() const
Definition: sqltypes.h:463
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:858
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
bool is_string() const
Definition: sqltypes.h:415
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:622
bool is_array() const
Definition: sqltypes.h:423
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
size_t length
Definition: sqltypes.h:73
const int device_id_
Definition: ResultSet.h:838

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void ResultSet::moveToBegin ( ) const

Definition at line 489 of file ResultSet.cpp.

489  {
490  crt_row_buff_idx_ = 0;
491  fetched_so_far_ = 0;
492 }
size_t fetched_so_far_
Definition: ResultSet.h:843
size_t crt_row_buff_idx_
Definition: ResultSet.h:842
size_t ResultSet::parallelRowCount ( ) const
private

Definition at line 390 of file ResultSet.cpp.

References cpu_threads(), g_use_tbb_pool, and anonymous_namespace{ResultSet.cpp}::get_truncated_row_count().

390  {
391  auto execute_parallel_row_count = [this](auto counter_threads) -> size_t {
392  const size_t worker_count = cpu_threads();
393  for (size_t i = 0,
394  start_entry = 0,
395  stride = (entryCount() + worker_count - 1) / worker_count;
396  i < worker_count && start_entry < entryCount();
397  ++i, start_entry += stride) {
398  const auto end_entry = std::min(start_entry + stride, entryCount());
399  counter_threads.append(
400  [this](const size_t start, const size_t end) {
401  size_t row_count{0};
402  for (size_t i = start; i < end; ++i) {
403  if (!isRowAtEmpty(i)) {
404  ++row_count;
405  }
406  }
407  return row_count;
408  },
409  start_entry,
410  end_entry);
411  }
412  const auto row_counts = counter_threads.join();
413  const size_t row_count = std::accumulate(row_counts.begin(), row_counts.end(), 0);
414  return row_count;
415  };
416  // will fall back to futures threadpool if TBB is not enabled
417  const auto row_count =
419  ? execute_parallel_row_count(threadpool::ThreadPool<size_t>())
420  : execute_parallel_row_count(threadpool::FuturesThreadPool<size_t>());
421 
422  return get_truncated_row_count(row_count, getLimit(), drop_first_);
423 }
size_t getLimit() const
Definition: ResultSet.cpp:984
size_t get_truncated_row_count(size_t total_row_count, size_t limit, size_t offset)
Definition: ResultSet.cpp:319
size_t drop_first_
Definition: ResultSet.h:844
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
int cpu_threads()
Definition: thread_count.h:25
bool g_use_tbb_pool
Definition: Execute.cpp:76

+ Here is the call graph for this function:

void ResultSet::parallelTop ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)
private

Definition at line 612 of file ResultSet.cpp.

References cpu_threads(), and DEBUG_TIMER.

613  {
614  auto timer = DEBUG_TIMER(__func__);
615  const size_t step = cpu_threads();
616  std::vector<std::vector<uint32_t>> strided_permutations(step);
617  std::vector<std::future<void>> init_futures;
618  for (size_t start = 0; start < step; ++start) {
619  init_futures.emplace_back(
620  std::async(std::launch::async, [this, start, step, &strided_permutations] {
621  strided_permutations[start] = initPermutationBuffer(start, step);
622  }));
623  }
624  for (auto& init_future : init_futures) {
625  init_future.wait();
626  }
627  for (auto& init_future : init_futures) {
628  init_future.get();
629  }
630  auto compare = createComparator(order_entries, true);
631  std::vector<std::future<void>> top_futures;
632  for (auto& strided_permutation : strided_permutations) {
633  top_futures.emplace_back(
634  std::async(std::launch::async, [&strided_permutation, &compare, top_n] {
635  topPermutation(strided_permutation, top_n, compare);
636  }));
637  }
638  for (auto& top_future : top_futures) {
639  top_future.wait();
640  }
641  for (auto& top_future : top_futures) {
642  top_future.get();
643  }
644  permutation_.reserve(strided_permutations.size() * top_n);
645  for (const auto& strided_permutation : strided_permutations) {
646  permutation_.insert(
647  permutation_.end(), strided_permutation.begin(), strided_permutation.end());
648  }
649  topPermutation(permutation_, top_n, compare);
650 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:589
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:871
#define DEBUG_TIMER(name)
Definition: Logger.h:313
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:774
int cpu_threads()
Definition: thread_count.h:25

+ Here is the call graph for this function:

void ResultSet::radixSortOnCpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 928 of file ResultSet.cpp.

References apply_permutation_cpu(), CHECK(), CHECK_EQ, DEBUG_TIMER, and sort_groups_cpu().

929  {
930  auto timer = DEBUG_TIMER(__func__);
932  std::vector<int64_t> tmp_buff(query_mem_desc_.getEntryCount());
933  std::vector<int32_t> idx_buff(query_mem_desc_.getEntryCount());
934  CHECK_EQ(size_t(1), order_entries.size());
935  auto buffer_ptr = storage_->getUnderlyingBuffer();
936  for (const auto& order_entry : order_entries) {
937  const auto target_idx = order_entry.tle_no - 1;
938  const auto sortkey_val_buff = reinterpret_cast<int64_t*>(
939  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
940  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
941  sort_groups_cpu(sortkey_val_buff,
942  &idx_buff[0],
944  order_entry.is_desc,
945  chosen_bytes);
946  apply_permutation_cpu(reinterpret_cast<int64_t*>(buffer_ptr),
947  &idx_buff[0],
949  &tmp_buff[0],
950  sizeof(int64_t));
951  for (size_t target_idx = 0; target_idx < query_mem_desc_.getSlotCount();
952  ++target_idx) {
953  if (static_cast<int>(target_idx) == order_entry.tle_no - 1) {
954  continue;
955  }
956  const auto chosen_bytes = query_mem_desc_.getPaddedSlotWidthBytes(target_idx);
957  const auto satellite_val_buff = reinterpret_cast<int64_t*>(
958  buffer_ptr + query_mem_desc_.getColOffInBytes(target_idx));
959  apply_permutation_cpu(satellite_val_buff,
960  &idx_buff[0],
962  &tmp_buff[0],
963  chosen_bytes);
964  }
965  }
966 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void sort_groups_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:27
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
void apply_permutation_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
Definition: InPlaceSort.cpp:46
CHECK(cgen_state)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define DEBUG_TIMER(name)
Definition: Logger.h:313
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void ResultSet::radixSortOnGpu ( const std::list< Analyzer::OrderEntry > &  order_entries) const
private

Definition at line 893 of file ResultSet.cpp.

References copy_group_by_buffers_from_gpu(), create_dev_group_by_buffers(), DEBUG_TIMER, GPU, inplace_sort_gpu(), and KernelPerFragment.

894  {
895  auto timer = DEBUG_TIMER(__func__);
896  auto data_mgr = &executor_->catalog_->getDataMgr();
897  const int device_id{0};
898  CudaAllocator cuda_allocator(data_mgr, device_id);
899  std::vector<int64_t*> group_by_buffers(executor_->blockSize());
900  group_by_buffers[0] = reinterpret_cast<int64_t*>(storage_->getUnderlyingBuffer());
901  auto dev_group_by_buffers =
902  create_dev_group_by_buffers(&cuda_allocator,
903  group_by_buffers,
905  executor_->blockSize(),
906  executor_->gridSize(),
907  device_id,
909  -1,
910  true,
911  true,
912  false,
913  nullptr);
915  order_entries, query_mem_desc_, dev_group_by_buffers, data_mgr, device_id);
917  data_mgr,
918  group_by_buffers,
920  dev_group_by_buffers.second,
922  executor_->blockSize(),
923  executor_->gridSize(),
924  device_id,
925  false);
926 }
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
GpuGroupByBuffers create_dev_group_by_buffers(DeviceAllocator *cuda_allocator, const std::vector< int64_t * > &group_by_buffers, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const ExecutorDispatchMode dispatch_mode, const int64_t num_input_rows, const bool prepend_index_buffer, const bool always_init_group_by_on_host, const bool use_bump_allocator, Allocator *insitu_allocator)
Definition: GpuMemUtils.cpp:60
const Executor * executor_
Definition: ResultSet.h:850
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
void inplace_sort_gpu(const std::list< Analyzer::OrderEntry > &order_entries, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &group_by_buffers, Data_Namespace::DataMgr *data_mgr, const int device_id)
void copy_group_by_buffers_from_gpu(Data_Namespace::DataMgr *data_mgr, const std::vector< int64_t * > &group_by_buffers, const size_t groups_buffer_size, const CUdeviceptr group_by_dev_buffers_mem, const QueryMemoryDescriptor &query_mem_desc, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer)
#define DEBUG_TIMER(name)
Definition: Logger.h:313

+ Here is the call graph for this function:

size_t ResultSet::rowCount ( const bool  force_parallel = false) const

Definition at line 335 of file ResultSet.cpp.

References CHECK_GE, and Projection.

335  {
336  if (just_explain_) {
337  return 1;
338  }
339  if (!permutation_.empty()) {
340  const auto limited_row_count = keep_first_ + drop_first_;
341  return limited_row_count ? std::min(limited_row_count, permutation_.size())
342  : permutation_.size();
343  }
344  if (cached_row_count_ != -1) {
346  return cached_row_count_;
347  }
348  if (!storage_) {
349  return 0;
350  }
351  if (permutation_.empty() &&
353  return binSearchRowCount();
354  }
355  if (force_parallel || entryCount() > 20000) {
356  return parallelRowCount();
357  }
358  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
359  moveToBegin();
360  size_t row_count{0};
361  while (true) {
362  auto crt_row = getNextRowUnlocked(false, false);
363  if (crt_row.empty()) {
364  break;
365  }
366  ++row_count;
367  }
368  moveToBegin();
369  return row_count;
370 }
std::mutex row_iteration_mutex_
Definition: ResultSet.h:875
void moveToBegin() const
Definition: ResultSet.cpp:489
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
size_t keep_first_
Definition: ResultSet.h:845
const bool just_explain_
Definition: ResultSet.h:873
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
size_t parallelRowCount() const
Definition: ResultSet.cpp:390
size_t drop_first_
Definition: ResultSet.h:844
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
QueryDescriptionType getQueryDescriptionType() const
size_t entryCount() const
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
size_t binSearchRowCount() const
Definition: ResultSet.cpp:377
ResultSetRowIterator ResultSet::rowIterator ( size_t  from_logical_index,
bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 336 of file ResultSet.h.

Referenced by rowIterator().

338  {
339  ResultSetRowIterator rowIterator(this, translate_strings, decimal_to_double);
340 
341  // move to first logical position
342  ++rowIterator;
343 
344  for (size_t index = 0; index < from_logical_index; index++) {
345  ++rowIterator;
346  }
347 
348  return rowIterator;
349  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

+ Here is the caller graph for this function:

ResultSetRowIterator ResultSet::rowIterator ( bool  translate_strings,
bool  decimal_to_double 
) const
inline

Definition at line 351 of file ResultSet.h.

References rowIterator().

352  {
353  return rowIterator(0, translate_strings, decimal_to_double);
354  }
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
ResultSetRowIterator rowIterator(size_t from_logical_index, bool translate_strings, bool decimal_to_double) const
Definition: ResultSet.h:336

+ Here is the call graph for this function:

void ResultSet::serialize ( TSerializedRows &  serialized_rows) const
void ResultSet::serializeCountDistinctColumns ( TSerializedRows &  ) const
private
void ResultSet::serializeProjection ( TSerializedRows &  serialized_rows) const
private
void ResultSet::serializeVarlenAggColumn ( int8_t *  buf,
std::vector< std::string > &  varlen_bufer 
) const
private
void ResultSet::setCachedRowCount ( const size_t  row_count) const

Definition at line 372 of file ResultSet.cpp.

References CHECK().

372  {
373  CHECK(cached_row_count_ == -1 || cached_row_count_ == static_cast<ssize_t>(row_count));
374  cached_row_count_ = row_count;
375 }
CHECK(cgen_state)
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874

+ Here is the call graph for this function:

void ResultSet::setGeoReturnType ( const GeoReturnType  val)
inline

Definition at line 509 of file ResultSet.h.

References geo_return_type_.

509 { geo_return_type_ = val; }
GeoReturnType geo_return_type_
Definition: ResultSet.h:878
void ResultSet::setKernelQueueTime ( const int64_t  kernel_queue_time)

Definition at line 472 of file ResultSet.cpp.

472  {
473  timings_.kernel_queue_time = kernel_queue_time;
474 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849
void ResultSet::setQueueTime ( const int64_t  queue_time)

Definition at line 468 of file ResultSet.cpp.

468  {
469  timings_.executor_queue_time = queue_time;
470 }
QueryExecutionTimings timings_
Definition: ResultSet.h:849
void ResultSet::setSeparateVarlenStorageValid ( const bool  val)
inline

Definition at line 541 of file ResultSet.h.

References separate_varlen_storage_valid_.

541  {
543  }
bool separate_varlen_storage_valid_
Definition: ResultSet.h:871
void ResultSet::sort ( const std::list< Analyzer::OrderEntry > &  order_entries,
const size_t  top_n 
)

Definition at line 518 of file ResultSet.cpp.

References Executor::baseline_threshold, CHECK(), CHECK_EQ, DEBUG_TIMER, g_enable_watchdog, LOG, and logger::WARNING.

519  {
520  auto timer = DEBUG_TIMER(__func__);
522  CHECK(!targets_.empty());
523 #ifdef HAVE_CUDA
524  if (canUseFastBaselineSort(order_entries, top_n)) {
525  baselineSort(order_entries, top_n);
526  return;
527  }
528 #endif // HAVE_CUDA
529  if (query_mem_desc_.sortOnGpu()) {
530  try {
531  radixSortOnGpu(order_entries);
532  } catch (const OutOfMemory&) {
533  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
534  radixSortOnCpu(order_entries);
535  } catch (const std::bad_alloc&) {
536  LOG(WARNING) << "Out of GPU memory during sort, finish on CPU";
537  radixSortOnCpu(order_entries);
538  }
539  return;
540  }
541  // This check isn't strictly required, but allows the index buffer to be 32-bit.
542  if (query_mem_desc_.getEntryCount() > std::numeric_limits<uint32_t>::max()) {
543  throw RowSortException("Sorting more than 4B elements not supported");
544  }
545 
546  CHECK(permutation_.empty());
547 
548  const bool use_heap{order_entries.size() == 1 && top_n};
549  if (use_heap && entryCount() > 100000) {
550  if (g_enable_watchdog && (entryCount() > 20000000)) {
551  throw WatchdogException("Sorting the result would be too slow");
552  }
553  parallelTop(order_entries, top_n);
554  return;
555  }
556 
558  throw WatchdogException("Sorting the result would be too slow");
559  }
560 
562 
563  auto compare = createComparator(order_entries, use_heap);
564 
565  if (use_heap) {
566  topPermutation(permutation_, top_n, compare);
567  } else {
568  sortPermutation(compare);
569  }
570 }
void baselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool g_enable_watchdog
#define LOG(tag)
Definition: Logger.h:188
static const size_t baseline_threshold
Definition: Execute.h:889
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
std::vector< uint32_t > initPermutationBuffer(const size_t start, const size_t step)
Definition: ResultSet.cpp:589
void radixSortOnCpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:928
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:836
CHECK(cgen_state)
bool canUseFastBaselineSort(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
std::atomic< ssize_t > cached_row_count_
Definition: ResultSet.h:874
static void topPermutation(std::vector< uint32_t > &to_sort, const size_t n, const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:871
void radixSortOnGpu(const std::list< Analyzer::OrderEntry > &order_entries) const
Definition: ResultSet.cpp:893
void sortPermutation(const std::function< bool(const uint32_t, const uint32_t)> compare)
Definition: ResultSet.cpp:887
size_t entryCount() const
void parallelTop(const std::list< Analyzer::OrderEntry > &order_entries, const size_t top_n)
Definition: ResultSet.cpp:612
#define DEBUG_TIMER(name)
Definition: Logger.h:313
std::function< bool(const uint32_t, const uint32_t)> createComparator(const std::list< Analyzer::OrderEntry > &order_entries, const bool use_heap)
Definition: ResultSet.h:774

+ Here is the call graph for this function:

void ResultSet::sortPermutation ( const std::function< bool(const uint32_t, const uint32_t)>  compare)
private

Definition at line 887 of file ResultSet.cpp.

References DEBUG_TIMER.

888  {
889  auto timer = DEBUG_TIMER(__func__);
890  std::sort(permutation_.begin(), permutation_.end(), compare);
891 }
std::vector< uint32_t > permutation_
Definition: ResultSet.h:847
#define DEBUG_TIMER(name)
Definition: Logger.h:313
void ResultSet::syncEstimatorBuffer ( ) const

Definition at line 453 of file ResultSet.cpp.

References CHECK(), CHECK_EQ, checked_calloc(), copy_from_gpu(), and GPU.

453  {
456  CHECK_EQ(size_t(0), estimator_->getBufferSize() % sizeof(int64_t));
458  static_cast<int8_t*>(checked_calloc(estimator_->getBufferSize(), 1));
460  auto device_buffer_ptr = device_estimator_buffer_->getMemoryPtr();
463  reinterpret_cast<CUdeviceptr>(device_buffer_ptr),
464  estimator_->getBufferSize(),
465  device_id_);
466 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
virtual int8_t * getMemoryPtr()=0
CHECK(cgen_state)
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:52
Data_Namespace::DataMgr * data_mgr_
Definition: ResultSet.h:865
const std::shared_ptr< const Analyzer::Estimator > estimator_
Definition: ResultSet.h:862
int8_t * host_estimator_buffer_
Definition: ResultSet.h:864
const ExecutorDeviceType device_type_
Definition: ResultSet.h:837
Data_Namespace::AbstractBuffer * device_estimator_buffer_
Definition: ResultSet.h:863
const int device_id_
Definition: ResultSet.h:838

+ Here is the call graph for this function:

void ResultSet::topPermutation ( std::vector< uint32_t > &  to_sort,
const size_t  n,
const std::function< bool(const uint32_t, const uint32_t)>  compare 
)
staticprivate

Definition at line 871 of file ResultSet.cpp.

References DEBUG_TIMER.

874  {
875  auto timer = DEBUG_TIMER(__func__);
876  std::make_heap(to_sort.begin(), to_sort.end(), compare);
877  std::vector<uint32_t> permutation_top;
878  permutation_top.reserve(n);
879  for (size_t i = 0; i < n && !to_sort.empty(); ++i) {
880  permutation_top.push_back(to_sort.front());
881  std::pop_heap(to_sort.begin(), to_sort.end(), compare);
882  to_sort.pop_back();
883  }
884  to_sort.swap(permutation_top);
885 }
#define DEBUG_TIMER(name)
Definition: Logger.h:313
static std::unique_ptr<ResultSet> ResultSet::unserialize ( const TSerializedRows &  serialized_rows,
const Executor  
)
static
void ResultSet::unserializeCountDistinctColumns ( const TSerializedRows &  )
private
void ResultSet::updateStorageEntryCount ( const size_t  new_entry_count)
inline

Definition at line 364 of file ResultSet.h.

References CHECK(), QueryMemoryDescriptor::getQueryDescriptionType(), Projection, query_mem_desc_, QueryMemoryDescriptor::setEntryCount(), and storage_.

364  {
366  query_mem_desc_.setEntryCount(new_entry_count);
367  CHECK(storage_);
368  storage_->updateEntryCount(new_entry_count);
369  }
void setEntryCount(const size_t val)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:839
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:840
CHECK(cgen_state)
QueryDescriptionType getQueryDescriptionType() const

+ Here is the call graph for this function:

Friends And Related Function Documentation

friend class ColumnarResults
friend

Definition at line 887 of file ResultSet.h.

friend class ResultSetManager
friend

Definition at line 885 of file ResultSet.h.

friend class ResultSetRowIterator
friend

Definition at line 886 of file ResultSet.h.

Member Data Documentation

AppendedStorage ResultSet::appended_storage_
private

Definition at line 841 of file ResultSet.h.

Referenced by copyColumnIntoBuffer().

std::atomic<ssize_t> ResultSet::cached_row_count_
mutableprivate

Definition at line 874 of file ResultSet.h.

std::vector<std::shared_ptr<std::list<ChunkIter> > > ResultSet::chunk_iters_
private

Definition at line 853 of file ResultSet.h.

Referenced by holdChunkIterators().

std::list<std::shared_ptr<Chunk_NS::Chunk> > ResultSet::chunks_
private

Definition at line 852 of file ResultSet.h.

Referenced by holdChunks().

std::vector<std::vector<std::vector<const int8_t*> > > ResultSet::col_buffers_
private
std::unique_ptr<ResultSetComparator<ColumnWiseTargetAccessor> > ResultSet::column_wise_comparator_
private

Definition at line 883 of file ResultSet.h.

Referenced by createComparator().

std::vector<std::vector<int64_t> > ResultSet::consistent_frag_sizes_
private

Definition at line 860 of file ResultSet.h.

Referenced by getColumnFrag().

size_t ResultSet::crt_row_buff_idx_
mutableprivate

Definition at line 842 of file ResultSet.h.

Data_Namespace::DataMgr* ResultSet::data_mgr_
private

Definition at line 865 of file ResultSet.h.

Data_Namespace::AbstractBuffer* ResultSet::device_estimator_buffer_ {nullptr}
private

Definition at line 863 of file ResultSet.h.

const int ResultSet::device_id_
private

Definition at line 838 of file ResultSet.h.

Referenced by getVarlenOrderEntry(), makeGeoTargetValue(), and makeVarlenTargetValue().

const ExecutorDeviceType ResultSet::device_type_
private
size_t ResultSet::drop_first_
private

Definition at line 844 of file ResultSet.h.

const std::shared_ptr<const Analyzer::Estimator> ResultSet::estimator_
private

Definition at line 862 of file ResultSet.h.

const Executor* ResultSet::executor_
private

Definition at line 850 of file ResultSet.h.

Referenced by makeTargetValue(), and makeVarlenTargetValue().

std::string ResultSet::explanation_
private

Definition at line 872 of file ResultSet.h.

size_t ResultSet::fetched_so_far_
mutableprivate

Definition at line 843 of file ResultSet.h.

std::vector<std::vector<std::vector<int64_t> > > ResultSet::frag_offsets_
private

Definition at line 859 of file ResultSet.h.

Referenced by getColumnFrag().

GeoReturnType ResultSet::geo_return_type_
mutableprivate

Definition at line 878 of file ResultSet.h.

Referenced by getGeoReturnType(), makeGeoTargetValue(), and setGeoReturnType().

int8_t* ResultSet::host_estimator_buffer_ {nullptr}
mutableprivate

Definition at line 864 of file ResultSet.h.

const bool ResultSet::just_explain_
private

Definition at line 873 of file ResultSet.h.

size_t ResultSet::keep_first_
private

Definition at line 845 of file ResultSet.h.

const std::vector<ColumnLazyFetchInfo> ResultSet::lazy_fetch_info_
private
std::vector<std::vector<int8_t> > ResultSet::literal_buffers_
private

Definition at line 856 of file ResultSet.h.

Referenced by holdLiterals().

std::vector<uint32_t> ResultSet::permutation_
private

Definition at line 847 of file ResultSet.h.

Referenced by entryCount(), and isPermutationBufferEmpty().

std::mutex ResultSet::row_iteration_mutex_
mutableprivate

Definition at line 875 of file ResultSet.h.

std::shared_ptr<RowSetMemoryOwner> ResultSet::row_set_mem_owner_
private
std::unique_ptr<ResultSetComparator<RowWiseTargetAccessor> > ResultSet::row_wise_comparator_
private

Definition at line 882 of file ResultSet.h.

Referenced by createComparator().

bool ResultSet::separate_varlen_storage_valid_
private
std::vector<SerializedVarlenBufferStorage> ResultSet::serialized_varlen_buffer_
private

Definition at line 870 of file ResultSet.h.

Referenced by makeGeoTargetValue(), and makeVarlenTargetValue().

const std::vector<TargetInfo> ResultSet::targets_
private

Definition at line 836 of file ResultSet.h.

Referenced by ResultSetStorage::isEmptyEntryColumnar(), isGeoColOnGpu(), and lazyReadInt().

QueryExecutionTimings ResultSet::timings_
private

Definition at line 849 of file ResultSet.h.


The documentation for this class was generated from the following files: