OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RangeJoinHashTable Class Referencefinal

#include <RangeJoinHashTable.h>

+ Inheritance diagram for RangeJoinHashTable:
+ Collaboration diagram for RangeJoinHashTable:

Public Member Functions

 RangeJoinHashTable (const std::shared_ptr< Analyzer::BinOper > condition, const JoinType join_type, const Analyzer::RangeOper *range_expr, std::shared_ptr< Analyzer::ColumnVar > inner_col_expr, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, ColumnCacheMap &column_cache, Executor *executor, const std::vector< InnerOuter > &inner_outer_pairs, const int device_count, const RegisteredQueryHint &query_hints, const HashTableBuildDagMap &hashtable_build_dag_map, const TableIdToNodeMap &table_id_to_node_map)
 
 ~RangeJoinHashTable () override=default
 
llvm::Value * codegenKey (const CompilationOptions &co, llvm::Value *offset)
 
HashJoinMatchingSet codegenMatchingSetWithOffset (const CompilationOptions &, const size_t, llvm::Value *)
 
- Public Member Functions inherited from BoundingBoxIntersectJoinHashTable
 BoundingBoxIntersectJoinHashTable (const std::shared_ptr< Analyzer::BinOper > condition, const JoinType join_type, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, ColumnCacheMap &column_cache, Executor *executor, const std::vector< InnerOuter > &inner_outer_pairs, const int device_count, const RegisteredQueryHint &query_hints, const HashTableBuildDagMap &hashtable_build_dag_map, const TableIdToNodeMap &table_id_to_node_map)
 
virtual ~BoundingBoxIntersectJoinHashTable ()
 
- Public Member Functions inherited from HashJoin
virtual std::string toStringFlat64 (const ExecutorDeviceType device_type, const int device_id) const
 
virtual std::string toStringFlat32 (const ExecutorDeviceType device_type, const int device_id) const
 
JoinColumn fetchJoinColumn (const Analyzer::ColumnVar *hash_col, const std::vector< Fragmenter_Namespace::FragmentInfo > &fragment_info, const Data_Namespace::MemoryLevel effective_memory_level, const int device_id, std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner, DeviceAllocator *dev_buff_owner, std::vector< std::shared_ptr< void >> &malloc_owner, Executor *executor, ColumnCacheMap *column_cache)
 
HashTablegetHashTableForDevice (const size_t device_id) const
 
size_t getJoinHashBufferSize (const ExecutorDeviceType device_type)
 
size_t getJoinHashBufferSize (const ExecutorDeviceType device_type, const int device_id) const
 
int8_t * getJoinHashBuffer (const ExecutorDeviceType device_type, const int device_id) const
 
void freeHashBufferMemory ()
 

Static Public Member Functions

static std::shared_ptr
< RangeJoinHashTable
getInstance (const std::shared_ptr< Analyzer::BinOper > condition, const Analyzer::RangeOper *range_expr, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const RegisteredQueryHint &query_hints, const TableIdToNodeMap &table_id_to_node_map)
 
- Static Public Member Functions inherited from BoundingBoxIntersectJoinHashTable
static std::shared_ptr
< BoundingBoxIntersectJoinHashTable
getInstance (const std::shared_ptr< Analyzer::BinOper > condition, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const RegisteredQueryHint &query_hint, const TableIdToNodeMap &table_id_to_node_map)
 Make hash table from an in-flight SQL query's parse tree etc. More...
 
static void invalidateCache ()
 
static void markCachedItemAsDirty (size_t table_key)
 
static HashtableRecyclergetHashTableCache ()
 
static
BoundingBoxIntersectTuningParamRecycler
getBoundingBoxIntersectTuningParamCache ()
 
- Static Public Member Functions inherited from HashJoin
static size_t getMaximumNumHashEntriesCanHold (MemoryLevel memory_level, const Executor *executor, size_t rowid_size) noexcept
 
static std::string generateTooManyHashEntriesErrMsg (size_t num_entries, size_t threshold, MemoryLevel memory_level)
 
static bool layoutRequiresAdditionalBuffers (HashType layout) noexcept
 
static std::string getHashTypeString (HashType ht) noexcept
 
static HashJoinMatchingSet codegenMatchingSet (const std::vector< llvm::Value * > &hash_join_idx_args_in, const bool is_sharded, const bool col_is_nullable, const bool is_bw_eq, const int64_t sub_buff_size, Executor *executor, const bool is_bucketized=false)
 
static llvm::Value * codegenHashTableLoad (const size_t table_idx, Executor *executor)
 
static std::shared_ptr< HashJoingetInstance (const std::shared_ptr< Analyzer::BinOper > qual_bin_oper, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const RegisteredQueryHint &query_hint, const TableIdToNodeMap &table_id_to_node_map)
 Make hash table from an in-flight SQL query's parse tree etc. More...
 
static std::shared_ptr< HashJoingetSyntheticInstance (std::string_view table1, std::string_view column1, const Catalog_Namespace::Catalog &catalog1, std::string_view table2, std::string_view column2, const Catalog_Namespace::Catalog &catalog2, const Data_Namespace::MemoryLevel memory_level, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor)
 Make hash table from named tables and columns (such as for testing). More...
 
static std::shared_ptr< HashJoingetSyntheticInstance (const std::shared_ptr< Analyzer::BinOper > qual_bin_oper, const Data_Namespace::MemoryLevel memory_level, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor)
 Make hash table from named tables and columns (such as for testing). More...
 
static std::pair< std::string,
std::shared_ptr< HashJoin > > 
getSyntheticInstance (std::vector< std::shared_ptr< Analyzer::BinOper >>, const Data_Namespace::MemoryLevel memory_level, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor)
 
static shared::TableKey getInnerTableId (const std::vector< InnerOuter > &inner_outer_pairs)
 
static bool canAccessHashTable (bool allow_hash_table_recycling, bool invalid_cache_key, JoinType join_type)
 
static void checkHashJoinReplicationConstraint (const shared::TableKey &table_key, const size_t shard_count, const Executor *executor)
 
static std::pair< InnerOuter,
InnerOuterStringOpInfos
normalizeColumnPair (const Analyzer::Expr *lhs, const Analyzer::Expr *rhs, const TemporaryTables *temporary_tables, const bool is_bbox_intersect=false)
 
template<typename T >
static const T * getHashJoinColumn (const Analyzer::Expr *expr)
 
static std::pair< std::vector
< InnerOuter >, std::vector
< InnerOuterStringOpInfos > > 
normalizeColumnPairs (const Analyzer::BinOper *condition, const TemporaryTables *temporary_tables)
 
static std::vector< int > collectFragmentIds (const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments)
 
static CompositeKeyInfo getCompositeKeyInfo (const std::vector< InnerOuter > &inner_outer_pairs, const Executor *executor, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs={})
 
static std::vector< const
StringDictionaryProxy::IdMap * > 
translateCompositeStrDictProxies (const CompositeKeyInfo &composite_key_info, const std::vector< InnerOuterStringOpInfos > &string_op_infos_for_keys, const Executor *executor)
 
static std::pair< const
StringDictionaryProxy
*, StringDictionaryProxy * > 
getStrDictProxies (const InnerOuter &cols, const Executor *executor, const bool has_string_ops)
 
static const
StringDictionaryProxy::IdMap
translateInnerToOuterStrDictProxies (const InnerOuter &cols, const InnerOuterStringOpInfos &inner_outer_string_op_infos, ExpressionRange &old_col_range, const Executor *executor)
 

Protected Member Functions

void reifyWithLayout (const HashType layout) override
 
void reifyForDevice (const ColumnsForDevice &columns_for_device, const HashType layout, const size_t entry_count, const size_t emitted_keys_count, const int device_id, const logger::ThreadLocalIds parent_thread_local_ids)
 
std::shared_ptr
< BaselineHashTable
initHashTableOnCpu (const std::vector< JoinColumn > &join_columns, const std::vector< JoinColumnTypeInfo > &join_column_types, const std::vector< JoinBucketInfo > &join_bucket_info, const BaselineHashTableEntryInfo hash_table_entry_info)
 
HashType getHashType () const noexceptoverride
 
std::pair< size_t, size_t > approximateTupleCount (const std::vector< double > &inverse_bucket_sizes_for_dimension, std::vector< ColumnsForDevice > &columns_per_device, const size_t chosen_max_hashtable_size, const double chosen_bucket_threshold) override
 
std::pair< size_t, size_t > computeRangeHashTableCounts (const size_t shard_count, std::vector< ColumnsForDevice > &columns_per_device)
 
- Protected Member Functions inherited from BoundingBoxIntersectJoinHashTable
void reify (const HashType preferred_layout)
 
virtual void reifyImpl (std::vector< ColumnsForDevice > &columns_per_device, const Fragmenter_Namespace::TableInfo &query_info, const HashType layout, const size_t shard_count, const size_t entry_count, const size_t emitted_keys_count, const bool skip_hashtable_caching, const size_t chosen_max_hashtable_size, const double chosen_bucket_threshold)
 
void reifyForDevice (const ColumnsForDevice &columns_for_device, const HashType layout, const size_t entry_count, const size_t emitted_keys_count, const bool skip_hashtable_caching, const int device_id, const logger::ThreadLocalIds parent_thread_local_ids)
 
size_t calculateHashTableSize (size_t number_of_dimensions, size_t emitted_keys_count, size_t entry_count) const
 
ColumnsForDevice fetchColumnsForDevice (const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments, const int device_id, DeviceAllocator *dev_buff_owner)
 
virtual std::pair< size_t, size_t > computeHashTableCounts (const size_t shard_count, const std::vector< double > &inverse_bucket_sizes_for_dimension, std::vector< ColumnsForDevice > &columns_per_device, const size_t chosen_max_hashtable_size, const double chosen_bucket_threshold)
 
void setInverseBucketSizeInfo (const std::vector< double > &inverse_bucket_sizes, std::vector< ColumnsForDevice > &columns_per_device, const size_t device_count)
 
size_t getKeyComponentWidth () const
 
size_t getKeyComponentCount () const
 
Data_Namespace::MemoryLevel getMemoryLevel () const noexceptoverride
 
int getDeviceCount () const noexceptoverride
 
std::shared_ptr
< BaselineHashTable
initHashTableOnCpu (const std::vector< JoinColumn > &join_columns, const std::vector< JoinColumnTypeInfo > &join_column_types, const std::vector< JoinBucketInfo > &join_bucket_info, const BaselineHashTableEntryInfo hash_table_entry_info, const bool skip_hashtable_caching)
 
HashJoinMatchingSet codegenMatchingSet (const CompilationOptions &, const size_t) override
 
std::string toString (const ExecutorDeviceType device_type, const int device_id=0, bool raw=false) const override
 
DecodedJoinHashBufferSet toSet (const ExecutorDeviceType device_type, const int device_id) const override
 
llvm::Value * codegenSlot (const CompilationOptions &, const size_t) override
 
const RegisteredQueryHintgetRegisteredQueryHint ()
 
size_t getEntryCount () const
 
size_t getEmittedKeysCount () const
 
size_t getComponentBufferSize () const noexceptoverride
 
size_t shardCount () const
 
Data_Namespace::MemoryLevel getEffectiveMemoryLevel (const std::vector< InnerOuter > &inner_outer_pairs) const
 
shared::TableKey getInnerTableId () const noexceptoverride
 
int getInnerTableRteIdx () const noexceptoverride
 
size_t getKeyBufferSize () const noexcept
 
size_t offsetBufferOff () const noexceptoverride
 
size_t countBufferOff () const noexceptoverride
 
size_t payloadBufferOff () const noexceptoverride
 
std::string getHashJoinType () const final
 
bool isBitwiseEq () const override
 
std::shared_ptr< HashTableinitHashTableOnCpuFromCache (QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier)
 
std::optional< std::pair
< size_t, size_t > > 
getApproximateTupleCountFromCache (QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier)
 
void putHashTableOnCpuToCache (QueryPlanHash key, CacheItemType item_type, std::shared_ptr< HashTable > hashtable_ptr, DeviceIdentifier device_identifier, size_t hashtable_building_time)
 
llvm::Value * codegenKey (const CompilationOptions &)
 
std::vector< llvm::Value * > codegenManyKey (const CompilationOptions &)
 
std::optional
< BoundingBoxIntersectMetaInfo
getBoundingBoxIntersectMetaInfo ()
 
QueryPlanHash getAlternativeCacheKey (AlternativeCacheKeyForBoundingBoxIntersection &info)
 
void generateCacheKey (const size_t max_hashtable_size, const double bucket_threshold, const std::vector< double > &bucket_sizes, std::vector< std::vector< Fragmenter_Namespace::FragmentInfo >> &fragments_per_device, int device_count)
 
QueryPlanHash getCacheKey (int device_id) const
 
const std::vector< InnerOuter > & getInnerOuterPairs () const
 
void setBoundingBoxIntersectionMetaInfo (size_t max_table_size_bytes, double bucket_threshold, std::vector< double > &bucket_sizes)
 

Private Member Functions

bool isInnerColCompressed () const
 
bool isProbeCompressed () const
 

Private Attributes

const Analyzer::RangeOperrange_expr_
 
std::shared_ptr
< Analyzer::ColumnVar
inner_col_expr_
 
Data_Namespace::MemoryLevel effective_memory_level_
 
const double bucket_threshold_ {std::numeric_limits<double>::max()}
 
const size_t max_hashtable_size_ {std::numeric_limits<size_t>::max()}
 

Additional Inherited Members

- Static Public Attributes inherited from HashJoin
static constexpr size_t MAX_NUM_HASH_ENTRIES = size_t(1) << 31
 
- Static Protected Member Functions inherited from HashJoin
static llvm::Value * codegenColOrStringOper (const Analyzer::Expr *col_or_string_oper, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos, CodeGenerator &code_generator, const CompilationOptions &co)
 
- Protected Attributes inherited from BoundingBoxIntersectJoinHashTable
const std::shared_ptr
< Analyzer::BinOper
condition_
 
const JoinType join_type_
 
const std::vector
< InputTableInfo > & 
query_infos_
 
const Data_Namespace::MemoryLevel memory_level_
 
Executorexecutor_
 
ColumnCacheMapcolumn_cache_
 
std::vector< InnerOuterinner_outer_pairs_
 
const int device_count_
 
RegisteredQueryHint query_hints_
 
std::vector< double > inverse_bucket_sizes_for_dimension_
 
double chosen_bbox_intersect_bucket_threshold_
 
size_t chosen_bbox_intersect_max_table_size_bytes_
 
CompositeKeyInfo composite_key_info_
 
std::optional< HashTypelayout_override_
 
std::mutex cpu_hash_table_buff_mutex_
 
HashTableBuildDagMap hashtable_build_dag_map_
 
QueryPlanDAG query_plan_dag_
 
std::vector< QueryPlanHashhashtable_cache_key_
 
HashtableCacheMetaInfo hashtable_cache_meta_info_
 
std::unordered_set< size_t > table_keys_
 
const TableIdToNodeMap table_id_to_node_map_
 
- Protected Attributes inherited from HashJoin
std::vector< std::shared_ptr
< HashTable > > 
hash_tables_for_device_
 
- Static Protected Attributes inherited from BoundingBoxIntersectJoinHashTable
static std::unique_ptr
< HashtableRecycler
hash_table_cache_
 
static std::unique_ptr
< BoundingBoxIntersectTuningParamRecycler
auto_tuner_cache_
 

Detailed Description

Definition at line 21 of file RangeJoinHashTable.h.

Constructor & Destructor Documentation

RangeJoinHashTable::RangeJoinHashTable ( const std::shared_ptr< Analyzer::BinOper condition,
const JoinType  join_type,
const Analyzer::RangeOper range_expr,
std::shared_ptr< Analyzer::ColumnVar inner_col_expr,
const std::vector< InputTableInfo > &  query_infos,
const Data_Namespace::MemoryLevel  memory_level,
ColumnCacheMap column_cache,
Executor executor,
const std::vector< InnerOuter > &  inner_outer_pairs,
const int  device_count,
const RegisteredQueryHint query_hints,
const HashTableBuildDagMap hashtable_build_dag_map,
const TableIdToNodeMap table_id_to_node_map 
)
inline

Definition at line 23 of file RangeJoinHashTable.h.

37  join_type,
38  query_infos,
39  memory_level,
40  column_cache,
41  executor,
42  inner_outer_pairs,
43  device_count,
44  query_hints,
45  hashtable_build_dag_map,
46  table_id_to_node_map)
47  , range_expr_(range_expr)
48  , inner_col_expr_(std::move(inner_col_expr)) {}
BoundingBoxIntersectJoinHashTable(const std::shared_ptr< Analyzer::BinOper > condition, const JoinType join_type, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, ColumnCacheMap &column_cache, Executor *executor, const std::vector< InnerOuter > &inner_outer_pairs, const int device_count, const RegisteredQueryHint &query_hints, const HashTableBuildDagMap &hashtable_build_dag_map, const TableIdToNodeMap &table_id_to_node_map)
std::shared_ptr< Analyzer::ColumnVar > inner_col_expr_
const Analyzer::RangeOper * range_expr_
RangeJoinHashTable::~RangeJoinHashTable ( )
overridedefault

Member Function Documentation

std::pair< size_t, size_t > RangeJoinHashTable::approximateTupleCount ( const std::vector< double > &  inverse_bucket_sizes_for_dimension,
std::vector< ColumnsForDevice > &  columns_per_device,
const size_t  chosen_max_hashtable_size,
const double  chosen_bucket_threshold 
)
overrideprotectedvirtual

Reimplemented from BoundingBoxIntersectJoinHashTable.

Definition at line 518 of file RangeJoinHashTable.cpp.

References approximate_distinct_tuples_on_device_range(), approximate_distinct_tuples_range(), threading_serial::async(), Bitmap, CHECK, CHECK_EQ, CHECK_GT, CPU, Data_Namespace::CPU_LEVEL, cpu_threads(), BoundingBoxIntersectJoinHashTable::device_count_, effective_memory_level_, BoundingBoxIntersectJoinHashTable::executor_, HashJoin::getCompositeKeyInfo(), getQueryEngineCudaStreamForDevice(), GPU, Data_Namespace::GPU_LEVEL, hll_size(), hll_unify(), CountDistinctDescriptor::impl_type_, BoundingBoxIntersectJoinHashTable::inner_outer_pairs_, isInnerColCompressed(), transfer_flat_object_to_gpu(), transfer_vector_of_flat_objects_to_gpu(), and UNREACHABLE.

Referenced by computeRangeHashTableCounts().

522  {
523 #ifdef _WIN32
524  // WIN32 needs have C++20 set for designated initialisation to work
525  CountDistinctDescriptor count_distinct_desc{
527  0,
528  0,
529  11,
530  true,
534  1,
535  };
536 #else
537  CountDistinctDescriptor count_distinct_desc{
539  .min_val = 0,
540  .bucket_size = 0,
541  .bitmap_sz_bits = 11,
542  .approximate = true,
546  .sub_bitmap_count = 1,
547  };
548 #endif
549  const auto padded_size_bytes = count_distinct_desc.bitmapPaddedSizeBytes();
550 
551  CHECK(!columns_per_device.empty() && !columns_per_device.front().join_columns.empty());
552  if (columns_per_device.front().join_columns.front().num_elems == 0) {
553  return std::make_pair(0, 0);
554  }
555 
556  for (auto& columns_for_device : columns_per_device) {
557  columns_for_device.setBucketInfo(inverse_bucket_sizes_for_dimension,
559  }
560 
561  // Number of keys must match dimension of buckets
562  CHECK_EQ(columns_per_device.front().join_columns.size(),
563  columns_per_device.front().join_buckets.size());
565  const auto composite_key_info =
567  int thread_count = cpu_threads();
568  std::vector<uint8_t> hll_buffer_all_cpus(thread_count * padded_size_bytes);
569  auto hll_result = &hll_buffer_all_cpus[0];
570 
571  std::vector<int32_t> num_keys_for_row;
572  num_keys_for_row.resize(columns_per_device.front().join_columns[0].num_elems);
573 
575  num_keys_for_row,
576  count_distinct_desc.bitmap_sz_bits,
577  padded_size_bytes,
578  columns_per_device.front().join_columns,
579  columns_per_device.front().join_column_types,
580  columns_per_device.front().join_buckets,
582  thread_count);
583 
584  for (int i = 1; i < thread_count; ++i) {
585  hll_unify(hll_result,
586  hll_result + i * padded_size_bytes,
587  size_t(1) << count_distinct_desc.bitmap_sz_bits);
588  }
589  return std::make_pair(hll_size(hll_result, count_distinct_desc.bitmap_sz_bits),
590  num_keys_for_row.size() > 0 ? num_keys_for_row.back() : 0);
591  }
592 #ifdef HAVE_CUDA
593  auto& data_mgr = *executor_->getDataMgr();
594  std::vector<std::vector<uint8_t>> host_hll_buffers(device_count_);
595  for (auto& host_hll_buffer : host_hll_buffers) {
596  host_hll_buffer.resize(count_distinct_desc.bitmapPaddedSizeBytes());
597  }
598  std::vector<size_t> emitted_keys_count_device_threads(device_count_, 0);
599  std::vector<std::future<void>> approximate_distinct_device_threads;
600  for (int device_id = 0; device_id < device_count_; ++device_id) {
601  approximate_distinct_device_threads.emplace_back(std::async(
603  [device_id,
604  &columns_per_device,
605  &count_distinct_desc,
606  &data_mgr,
607  &host_hll_buffers,
608  &emitted_keys_count_device_threads,
609  this] {
610  auto allocator = std::make_unique<CudaAllocator>(
611  &data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
612  auto device_hll_buffer =
613  allocator->alloc(count_distinct_desc.bitmapPaddedSizeBytes());
614  data_mgr.getCudaMgr()->zeroDeviceMem(
615  device_hll_buffer,
616  count_distinct_desc.bitmapPaddedSizeBytes(),
617  device_id,
619  const auto& columns_for_device = columns_per_device[device_id];
620  auto join_columns_gpu = transfer_vector_of_flat_objects_to_gpu(
621  columns_for_device.join_columns, *allocator);
622 
623  CHECK_GT(columns_for_device.join_buckets.size(), 0u);
624  const auto& bucket_sizes_for_dimension =
625  columns_for_device.join_buckets[0].inverse_bucket_sizes_for_dimension;
626  auto bucket_sizes_gpu =
627  allocator->alloc(bucket_sizes_for_dimension.size() * sizeof(double));
628  allocator->copyToDevice(bucket_sizes_gpu,
629  bucket_sizes_for_dimension.data(),
630  bucket_sizes_for_dimension.size() * sizeof(double));
631  const size_t row_counts_buffer_sz =
632  columns_per_device.front().join_columns[0].num_elems * sizeof(int32_t);
633  auto row_counts_buffer = allocator->alloc(row_counts_buffer_sz);
634  data_mgr.getCudaMgr()->zeroDeviceMem(
635  row_counts_buffer,
636  row_counts_buffer_sz,
637  device_id,
639  const auto key_handler =
641  bucket_sizes_for_dimension.size(),
642  join_columns_gpu,
643  reinterpret_cast<double*>(bucket_sizes_gpu));
644  const auto key_handler_gpu =
645  transfer_flat_object_to_gpu(key_handler, *allocator);
647  reinterpret_cast<uint8_t*>(device_hll_buffer),
648  count_distinct_desc.bitmap_sz_bits,
649  reinterpret_cast<int32_t*>(row_counts_buffer),
650  key_handler_gpu,
651  columns_for_device.join_columns[0].num_elems,
652  executor_->blockSize(),
653  executor_->gridSize());
654 
655  auto& host_emitted_keys_count = emitted_keys_count_device_threads[device_id];
656  allocator->copyFromDevice(
657  &host_emitted_keys_count,
658  row_counts_buffer +
659  (columns_per_device.front().join_columns[0].num_elems - 1) *
660  sizeof(int32_t),
661  sizeof(int32_t));
662 
663  auto& host_hll_buffer = host_hll_buffers[device_id];
664  allocator->copyFromDevice(&host_hll_buffer[0],
665  device_hll_buffer,
666  count_distinct_desc.bitmapPaddedSizeBytes());
667  }));
668  }
669  for (auto& child : approximate_distinct_device_threads) {
670  child.get();
671  }
673  auto& result_hll_buffer = host_hll_buffers.front();
674  auto hll_result = reinterpret_cast<int32_t*>(&result_hll_buffer[0]);
675  for (int device_id = 1; device_id < device_count_; ++device_id) {
676  auto& host_hll_buffer = host_hll_buffers[device_id];
677  hll_unify(hll_result,
678  reinterpret_cast<int32_t*>(&host_hll_buffer[0]),
679  size_t(1) << count_distinct_desc.bitmap_sz_bits);
680  }
681  size_t emitted_keys_count = 0;
682  for (auto& emitted_keys_count_device : emitted_keys_count_device_threads) {
683  emitted_keys_count += emitted_keys_count_device;
684  }
685  return std::make_pair(hll_size(hll_result, count_distinct_desc.bitmap_sz_bits),
686  emitted_keys_count);
687 #else
688  UNREACHABLE();
689  return {0, 0};
690 #endif // HAVE_CUDA
691 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
T * transfer_flat_object_to_gpu(const T &object, DeviceAllocator &allocator)
void hll_unify(T1 *lhs, T2 *rhs, const size_t m)
Definition: HyperLogLog.h:107
#define UNREACHABLE()
Definition: Logger.h:338
CountDistinctImplType impl_type_
size_t hll_size(const T *M, const size_t bitmap_sz_bits)
Definition: HyperLogLog.h:88
#define CHECK_GT(x, y)
Definition: Logger.h:305
void approximate_distinct_tuples_on_device_range(uint8_t *hll_buffer, const uint32_t b, int32_t *row_counts_buffer, const RangeKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x)
future< Result > async(Fn &&fn, Args &&...args)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
bool isInnerColCompressed() const
void approximate_distinct_tuples_range(uint8_t *hll_buffer_all_cpus, std::vector< int32_t > &row_counts, const uint32_t b, const size_t padded_size_bytes, const std::vector< JoinColumn > &join_column_per_key, const std::vector< JoinColumnTypeInfo > &type_info_per_key, const std::vector< JoinBucketInfo > &join_buckets_per_key, const bool is_compressed, const int thread_count)
#define CHECK(condition)
Definition: Logger.h:291
Data_Namespace::MemoryLevel effective_memory_level_
T * transfer_vector_of_flat_objects_to_gpu(const std::vector< T > &vec, DeviceAllocator &allocator)
int cpu_threads()
Definition: thread_count.h:25
static CompositeKeyInfo getCompositeKeyInfo(const std::vector< InnerOuter > &inner_outer_pairs, const Executor *executor, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs={})
Definition: HashJoin.cpp:470

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

llvm::Value * RangeJoinHashTable::codegenKey ( const CompilationOptions co,
llvm::Value *  offset 
)

Definition at line 699 of file RangeJoinHashTable.cpp.

References CodeGenerator::castArrayPointer(), CHECK, CHECK_EQ, CodeGenerator::codegen(), BoundingBoxIntersectJoinHashTable::executor_, func_resolve, get_int_type(), Catalog_Namespace::get_metadata_for_column(), BoundingBoxIntersectJoinHashTable::getKeyComponentCount(), BoundingBoxIntersectJoinHashTable::getKeyComponentWidth(), BoundingBoxIntersectJoinHashTable::inner_outer_pairs_, BoundingBoxIntersectJoinHashTable::inverse_bucket_sizes_for_dimension_, isProbeCompressed(), kPOINT, kTINYINT, LL_BUILDER, LL_CONTEXT, LL_FP, LL_INT, and CodeGenerator::posArg().

Referenced by codegenMatchingSetWithOffset().

700  {
701  const auto key_component_width = getKeyComponentWidth();
702  CHECK(key_component_width == 4 || key_component_width == 8);
703  const auto key_size_lv = LL_INT(getKeyComponentCount() * key_component_width);
704  llvm::Value* key_buff_lv{nullptr};
705  switch (key_component_width) {
706  case 4:
707  key_buff_lv =
708  LL_BUILDER.CreateAlloca(llvm::Type::getInt32Ty(LL_CONTEXT), key_size_lv);
709  break;
710  case 8:
711  key_buff_lv =
712  LL_BUILDER.CreateAlloca(llvm::Type::getInt64Ty(LL_CONTEXT), key_size_lv);
713  break;
714  default:
715  CHECK(false);
716  }
717 
718  const auto& inner_outer_pair = inner_outer_pairs_[0];
719  const auto outer_col = inner_outer_pair.second;
720  const auto outer_col_ti = outer_col->get_type_info();
721 
722  if (outer_col_ti.is_geometry()) {
723  CodeGenerator code_generator(executor_);
724  // TODO(adb): for points we will use the coords array, but for other
725  // geometries we will need to use the bounding box. For now only support
726  // points.
727  CHECK_EQ(outer_col_ti.get_type(), kPOINT);
728  CHECK_EQ(inverse_bucket_sizes_for_dimension_.size(), static_cast<size_t>(2));
729 
730  llvm::Value* arr_ptr{nullptr};
731  // prepare point column (arr) ptr to generate code for hash table key
732  if (auto outer_col_var = dynamic_cast<const Analyzer::ColumnVar*>(outer_col)) {
733  const auto col_lvs = code_generator.codegen(outer_col, true, co);
734  CHECK_EQ(col_lvs.size(), size_t(1));
735  auto column_key = outer_col_var->getColumnKey();
736  if (column_key.table_id < 0) {
737  // todo: relax this
738  throw QueryNotSupported(
739  "Geospatial columns not yet supported in this temporary table context.");
740  }
741  column_key.column_id = column_key.column_id + 1;
742  const auto coords_cd = Catalog_Namespace::get_metadata_for_column(column_key);
743  CHECK(coords_cd);
744  const auto coords_ti = coords_cd->columnType;
745 
746  const auto array_buff_ptr = executor_->cgen_state_->emitExternalCall(
747  "array_buff",
748  llvm::Type::getInt8PtrTy(executor_->cgen_state_->context_),
749  {col_lvs.front(), code_generator.posArg(outer_col)});
750  CHECK(array_buff_ptr);
751  CHECK(coords_ti.get_elem_type().get_type() == kTINYINT)
752  << "Only TINYINT coordinates columns are supported in bounding box "
753  "intersection.";
754  arr_ptr =
755  code_generator.castArrayPointer(array_buff_ptr, coords_ti.get_elem_type());
756  } else if (auto geo_expr_outer_col =
757  dynamic_cast<const Analyzer::GeoOperator*>(outer_col)) {
758  const auto geo_expr_name = geo_expr_outer_col->getName();
759  if (func_resolve(geo_expr_name, "ST_Point"sv, "ST_Transform"sv, "ST_Centroid"sv)) {
760  // note that ST_SetSRID changes type info of the column, and is handled by
761  // translation phase, so when we use ST_SETSRID(ST_POINT(x, y), 4326)
762  // as a join column expression, we recognize it as ST_POINT (with SRID as 4326)
763  const auto col_lvs = code_generator.codegen(outer_col, true, co);
764  // listed functions keep point coordinates in the local variable (let say S)
765  // which is corresponding to the pointer that col_lvs[0] holds
766  // thus, all we need is to retrieve necessary coordinate from the S by varying
767  // its offset (i.e., i == 0 means x coordinate)
768  arr_ptr = LL_BUILDER.CreatePointerCast(
769  col_lvs[0], llvm::Type::getInt8PtrTy(executor_->cgen_state_->context_));
770  } else {
771  throw std::runtime_error(
772  "RHS key of the range join operator has a geospatial function which is not "
773  "supported yet: " +
774  geo_expr_name);
775  }
776  } else {
777  throw std::runtime_error("Range join operator has an invalid rhs key: " +
778  outer_col->toString());
779  }
780 
781  // load and unpack offsets
782  const auto offset =
783  LL_BUILDER.CreateLoad(offset_ptr->getType()->getPointerElementType(),
784  offset_ptr,
785  "packed_bucket_offset");
786  const auto x_offset =
787  LL_BUILDER.CreateTrunc(offset, llvm::Type::getInt32Ty(LL_CONTEXT));
788 
789  const auto y_offset_shifted =
790  LL_BUILDER.CreateLShr(offset, LL_INT(static_cast<int64_t>(32)));
791  const auto y_offset =
792  LL_BUILDER.CreateTrunc(y_offset_shifted, llvm::Type::getInt32Ty(LL_CONTEXT));
793 
794  const auto x_bucket_offset =
795  LL_BUILDER.CreateSExt(x_offset, llvm::Type::getInt64Ty(LL_CONTEXT));
796  const auto y_bucket_offset =
797  LL_BUILDER.CreateSExt(y_offset, llvm::Type::getInt64Ty(LL_CONTEXT));
798 
799  for (size_t i = 0; i < 2; i++) {
800  const auto key_comp_dest_lv = LL_BUILDER.CreateGEP(
801  key_buff_lv->getType()->getScalarType()->getPointerElementType(),
802  key_buff_lv,
803  LL_INT(i));
804 
805  const auto funcName = isProbeCompressed() ? "get_bucket_key_for_range_compressed"
806  : "get_bucket_key_for_range_double";
807 
808  // Note that get_bucket_key_for_range_compressed will need to be
809  // specialized for future compression schemes
810  auto bucket_key = executor_->cgen_state_->emitExternalCall(
811  funcName,
814 
815  auto bucket_key_shifted = i == 0
816  ? LL_BUILDER.CreateAdd(x_bucket_offset, bucket_key)
817  : LL_BUILDER.CreateAdd(y_bucket_offset, bucket_key);
818 
819  const auto col_lv = LL_BUILDER.CreateSExt(
820  bucket_key_shifted, get_int_type(key_component_width * 8, LL_CONTEXT));
821  LL_BUILDER.CreateStore(col_lv, key_comp_dest_lv);
822  }
823  } else {
824  LOG(FATAL) << "Range join key currently only supported for geospatial types.";
825  }
826  return key_buff_lv;
827 }
#define LL_INT(v)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
auto func_resolve
#define LOG(tag)
Definition: Logger.h:285
const ColumnDescriptor * get_metadata_for_column(const ::shared::ColumnKey &column_key)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
#define LL_CONTEXT
#define LL_FP(v)
#define LL_BUILDER
bool isProbeCompressed() const
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

HashJoinMatchingSet RangeJoinHashTable::codegenMatchingSetWithOffset ( const CompilationOptions co,
const size_t  index,
llvm::Value *  range_offset 
)

Definition at line 829 of file RangeJoinHashTable.cpp.

References CHECK, HashJoin::codegenHashTableLoad(), codegenKey(), HashJoin::codegenMatchingSet(), BoundingBoxIntersectJoinHashTable::executor_, get_int_type(), BoundingBoxIntersectJoinHashTable::getComponentBufferSize(), BoundingBoxIntersectJoinHashTable::getEntryCount(), getHashType(), BoundingBoxIntersectJoinHashTable::getKeyComponentCount(), BoundingBoxIntersectJoinHashTable::getKeyComponentWidth(), LL_BUILDER, LL_CONTEXT, LL_INT, BoundingBoxIntersectJoinHashTable::offsetBufferOff(), OneToMany, and to_string().

832  {
833  const auto key_component_width = getKeyComponentWidth();
834  CHECK(key_component_width == 4 || key_component_width == 8);
835 
836  auto key_buff_lv = codegenKey(co, range_offset);
838 
839  auto hash_ptr = codegenHashTableLoad(index, executor_);
840  const auto composite_dict_ptr_type =
841  llvm::Type::getIntNPtrTy(LL_CONTEXT, key_component_width * 8);
842 
843  const auto composite_key_dict =
844  hash_ptr->getType()->isPointerTy()
845  ? LL_BUILDER.CreatePointerCast(hash_ptr, composite_dict_ptr_type)
846  : LL_BUILDER.CreateIntToPtr(hash_ptr, composite_dict_ptr_type);
847 
848  const auto key_component_count = getKeyComponentCount();
849 
850  const auto funcName =
851  "get_composite_key_index_" + std::to_string(key_component_width * 8);
852 
853  const auto key = executor_->cgen_state_->emitExternalCall(funcName,
855  {key_buff_lv,
856  LL_INT(key_component_count),
857  composite_key_dict,
858  LL_INT(getEntryCount())});
859 
860  auto one_to_many_ptr = hash_ptr;
861  if (one_to_many_ptr->getType()->isPointerTy()) {
862  one_to_many_ptr =
863  LL_BUILDER.CreatePtrToInt(hash_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
864  } else {
865  CHECK(one_to_many_ptr->getType()->isIntegerTy(64));
866  }
867  const auto composite_key_dict_size = offsetBufferOff();
868  one_to_many_ptr =
869  LL_BUILDER.CreateAdd(one_to_many_ptr, LL_INT(composite_key_dict_size));
870 
872  /* hash_join_idx_args_in */ {one_to_many_ptr,
873  key,
874  LL_INT(int64_t(0)),
875  LL_INT(getEntryCount() - 1)},
876  /* is_sharded */ false,
877  /* is_nullable */ false,
878  /* is_bw_eq */ false,
879  /* sub_buff_size */ getComponentBufferSize(),
880  /* executor */ executor_);
881 }
#define LL_INT(v)
virtual HashJoinMatchingSet codegenMatchingSet(const CompilationOptions &, const size_t)=0
llvm::Value * codegenKey(const CompilationOptions &co, llvm::Value *offset)
static llvm::Value * codegenHashTableLoad(const size_t table_idx, Executor *executor)
Definition: HashJoin.cpp:259
HashType getHashType() const noexceptoverride
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
#define LL_CONTEXT
std::string to_string(char const *&&v)
#define LL_BUILDER
#define CHECK(condition)
Definition: Logger.h:291
size_t getComponentBufferSize() const noexceptoverride

+ Here is the call graph for this function:

std::pair< size_t, size_t > RangeJoinHashTable::computeRangeHashTableCounts ( const size_t  shard_count,
std::vector< ColumnsForDevice > &  columns_per_device 
)
protected

Definition at line 502 of file RangeJoinHashTable.cpp.

References approximateTupleCount(), bucket_threshold_, CHECK, BoundingBoxIntersectJoinHashTable::device_count_, get_entries_per_device(), BoundingBoxIntersectJoinHashTable::inverse_bucket_sizes_for_dimension_, max_hashtable_size_, and BoundingBoxIntersectJoinHashTable::memory_level_.

Referenced by reifyWithLayout().

504  {
506  const auto [tuple_count, emitted_keys_count] =
508  columns_per_device,
511  const auto entry_count = 2 * std::max(tuple_count, size_t(1));
512 
513  return std::make_pair(
514  get_entries_per_device(entry_count, shard_count, device_count_, memory_level_),
515  emitted_keys_count);
516 }
const Data_Namespace::MemoryLevel memory_level_
std::pair< size_t, size_t > approximateTupleCount(const std::vector< double > &inverse_bucket_sizes_for_dimension, std::vector< ColumnsForDevice > &columns_per_device, const size_t chosen_max_hashtable_size, const double chosen_bucket_threshold) override
const double bucket_threshold_
size_t get_entries_per_device(const size_t total_entries, const size_t shard_count, const size_t device_count, const Data_Namespace::MemoryLevel memory_level)
#define CHECK(condition)
Definition: Logger.h:291
const size_t max_hashtable_size_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

HashType RangeJoinHashTable::getHashType ( ) const
inlineoverrideprotectedvirtualnoexcept

Reimplemented from BoundingBoxIntersectJoinHashTable.

Definition at line 90 of file RangeJoinHashTable.h.

References OneToMany.

Referenced by codegenMatchingSetWithOffset().

90  {
91  return HashType::OneToMany;
92  }

+ Here is the caller graph for this function:

std::shared_ptr< RangeJoinHashTable > RangeJoinHashTable::getInstance ( const std::shared_ptr< Analyzer::BinOper condition,
const Analyzer::RangeOper range_expr,
const std::vector< InputTableInfo > &  query_infos,
const Data_Namespace::MemoryLevel  memory_level,
const JoinType  join_type,
const int  device_count,
ColumnCacheMap column_cache,
Executor executor,
const HashTableBuildDagMap hashtable_build_dag_map,
const RegisteredQueryHint query_hints,
const TableIdToNodeMap table_id_to_node_map 
)
static

NOTE(jclay): Handling Range Joins With Mixed Compression:

First, let's take a concrete example of a query that is rewritten as a range join. Notice in the first code block, that the condition operator is a bounding box intersection operator. The LHS is a column, and the RHS is the range operator. In order to have the hash table build and probe work properly, we need to ensure that the approriate runtime functions are selected. The following breakdown is provided to help document how the appropriate runtime funditon is selected.

  • The LHS of the RangeOper is used to build the hash table
  • The LHS of the BoundingBoxIntersectionOper + the RHS of the RangeOper is used as probe

SELECT count(*) FROM t1, t2 where ST_Distance(t1.p1_comp32, t2.p1) <= 6.3;

BinOper condition

((BoundingBoxIntersect) (ColumnVar table: (t1) column: (p1_comp32) GEOMETRY(POINT, 4326) ENCODING COMPRESSED(32)) (RangeOper) (ColumnVar table: (t2) column: (p1) GEOMETRY(POINT, 4326) ENCODING NONE), (Const 6.330000))

RangeOper condition

[(ColumnVar table: 5 (t2) column: 1 rte: 1 GEOMETRY(POINT, 4326) ENCODING NONE), (Const 6.330000)]

Same example as above, annotated:

SELECT count(*) FROM t1, t2 where ST_Distance( t1.p1_comp32, << Bounding Box Intersection Condition LHS t2.p1 << RangeOper LHS ) <= 6.3; << RangeOper RHS

In this case, we select the uncompressed runtime functions when building the hash table over t2.p1. When performing the probe, we must select the compressed runtime functions.

Definition at line 71 of file RangeJoinHashTable.cpp.

References CHECK, HashJoin::checkHashJoinReplicationConstraint(), logger::FATAL, get_inner_query_info(), Analyzer::RangeOper::get_left_operand(), Catalog_Namespace::get_metadata_for_column(), HashJoin::getInnerTableId(), Fragmenter_Namespace::TableInfo::getNumTuplesUpperBound(), BaselineJoinHashTable::getShardCountForCondition(), Data_Namespace::GPU_LEVEL, InputTableInfo::info, LOG, HashJoin::MAX_NUM_HASH_ENTRIES, OneToMany, and Analyzer::RangeOper::toString().

Referenced by BoundingBoxIntersectJoinHashTable::getInstance().

82  {
83  // the hash table is built over the LHS of the range oper. we then use the lhs
84  // of the bin oper + the rhs of the range oper for the probe
85  auto range_expr_col_var =
86  dynamic_cast<const Analyzer::ColumnVar*>(range_expr->get_left_operand());
87  if (!range_expr_col_var || !range_expr_col_var->get_type_info().is_geometry()) {
88  throw HashJoinFail("Could not build hash tables for range join | " +
89  range_expr->toString());
90  }
91 
92  CHECK(range_expr_col_var->get_type_info().is_geometry());
93 
94  auto coords_column_key = range_expr_col_var->getColumnKey();
95  coords_column_key.column_id = coords_column_key.column_id + 1;
96  const auto coords_cd = Catalog_Namespace::get_metadata_for_column(coords_column_key);
97  CHECK(coords_cd);
98 
99  auto range_join_inner_col_expr = makeExpr<Analyzer::ColumnVar>(
100  coords_cd->columnType, coords_column_key, range_expr_col_var->get_rte_idx());
101 
102  std::vector<InnerOuter> inner_outer_pairs;
103  inner_outer_pairs.emplace_back(
104  InnerOuter{dynamic_cast<Analyzer::ColumnVar*>(range_join_inner_col_expr.get()),
105  condition->get_left_operand()});
106 
107  const auto& query_info =
108  get_inner_query_info(HashJoin::getInnerTableId(inner_outer_pairs), query_infos)
109  .info;
110 
111  const auto total_entries = 2 * query_info.getNumTuplesUpperBound();
112  if (total_entries > HashJoin::MAX_NUM_HASH_ENTRIES) {
113  throw TooManyHashEntries();
114  }
115 
116  const auto shard_count = memory_level == Data_Namespace::GPU_LEVEL
118  condition.get(), executor, inner_outer_pairs)
119  : 0;
120 
121  auto join_hash_table = std::make_shared<RangeJoinHashTable>(condition,
122  join_type,
123  range_expr,
124  range_join_inner_col_expr,
125  query_infos,
126  memory_level,
127  column_cache,
128  executor,
129  inner_outer_pairs,
130  device_count,
131  query_hints,
132  hashtable_build_dag_map,
133  table_id_to_node_map);
135  HashJoin::getInnerTableId(inner_outer_pairs), shard_count, executor);
136  try {
137  join_hash_table->reifyWithLayout(HashType::OneToMany);
138  } catch (const HashJoinFail& e) {
139  throw HashJoinFail(std::string("Could not build a 1-to-1 correspondence for columns "
140  "involved in equijoin | ") +
141  e.what());
142  } catch (const ColumnarConversionNotSupported& e) {
143  throw HashJoinFail(std::string("Could not build hash tables for equijoin | ") +
144  e.what());
145  } catch (const JoinHashTableTooBig& e) {
146  throw e;
147  } catch (const std::exception& e) {
148  LOG(FATAL) << "Fatal error while attempting to build hash tables for join: "
149  << e.what();
150  }
151 
152  return join_hash_table;
153 }
Fragmenter_Namespace::TableInfo info
Definition: InputMetadata.h:35
std::pair< const Analyzer::ColumnVar *, const Analyzer::Expr * > InnerOuter
Definition: HashJoin.h:105
#define LOG(tag)
Definition: Logger.h:285
static void checkHashJoinReplicationConstraint(const shared::TableKey &table_key, const size_t shard_count, const Executor *executor)
Definition: HashJoin.cpp:796
const ColumnDescriptor * get_metadata_for_column(const ::shared::ColumnKey &column_key)
const InputTableInfo & get_inner_query_info(const shared::TableKey &inner_table_key, const std::vector< InputTableInfo > &query_infos)
const Expr * get_left_operand() const
Definition: Analyzer.h:552
std::string toString() const override
Definition: Analyzer.cpp:2850
static constexpr size_t MAX_NUM_HASH_ENTRIES
Definition: HashJoin.h:136
virtual shared::TableKey getInnerTableId() const noexcept=0
#define CHECK(condition)
Definition: Logger.h:291
static size_t getShardCountForCondition(const Analyzer::BinOper *condition, const Executor *executor, const std::vector< InnerOuter > &inner_outer_pairs)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr< BaselineHashTable > RangeJoinHashTable::initHashTableOnCpu ( const std::vector< JoinColumn > &  join_columns,
const std::vector< JoinColumnTypeInfo > &  join_column_types,
const std::vector< JoinBucketInfo > &  join_bucket_info,
const BaselineHashTableEntryInfo  hash_table_entry_info 
)
protected

Definition at line 448 of file RangeJoinHashTable.cpp.

References BBOX_INTERSECT_HT, CHECK, DataRecyclerUtil::CPU_DEVICE_IDENTIFIER, DEBUG_TIMER, BoundingBoxIntersectJoinHashTable::executor_, HashJoin::getCompositeKeyInfo(), HashTableEntryInfo::getHashTableLayout(), BoundingBoxIntersectJoinHashTable::hashtable_cache_key_, BoundingBoxIntersectJoinHashTable::inner_outer_pairs_, isInnerColCompressed(), BoundingBoxIntersectJoinHashTable::join_type_, HashJoin::layoutRequiresAdditionalBuffers(), BoundingBoxIntersectJoinHashTable::putHashTableOnCpuToCache(), BoundingBoxIntersectJoinHashTable::query_hints_, and to_string().

Referenced by reifyForDevice().

452  {
453  auto timer = DEBUG_TIMER(__func__);
454  decltype(std::chrono::steady_clock::now()) ts1, ts2;
455  ts1 = std::chrono::steady_clock::now();
456  const auto composite_key_info =
458  CHECK(!join_columns.empty());
459  CHECK(!join_bucket_info.empty());
460 
461  CHECK(layoutRequiresAdditionalBuffers(hash_table_entry_info.getHashTableLayout()));
462  const auto key_component_count =
463  join_bucket_info[0].inverse_bucket_sizes_for_dimension.size();
464 
465  auto key_handler =
467  key_component_count,
468  &join_columns[0],
469  join_bucket_info[0].inverse_bucket_sizes_for_dimension.data());
470 
473  dummy_str_proxy_translation_maps_ptrs_and_offsets;
474  const auto err =
475  builder.initHashTableOnCpu(&key_handler,
476  composite_key_info,
477  join_columns,
478  join_column_types,
479  join_bucket_info,
480  dummy_str_proxy_translation_maps_ptrs_and_offsets,
481  hash_table_entry_info,
482  join_type_,
483  executor_,
484  query_hints_);
485  ts2 = std::chrono::steady_clock::now();
486  if (err) {
487  throw HashJoinFail(std::string("Unrecognized error when initializing CPU "
488  "range join hash table (") +
489  std::to_string(err) + std::string(")"));
490  }
491  std::shared_ptr<BaselineHashTable> hash_table = builder.getHashTable();
492  auto hashtable_build_time =
493  std::chrono::duration_cast<std::chrono::milliseconds>(ts2 - ts1).count();
496  hash_table,
498  hashtable_build_time);
499  return hash_table;
500 }
std::string to_string(char const *&&v)
std::pair< std::vector< const int32_t * >, std::vector< int32_t >> StrProxyTranslationMapsPtrsAndOffsets
std::unique_ptr< BaselineHashTable > getHashTable()
std::shared_ptr< BaselineHashTable > initHashTableOnCpu(const std::vector< JoinColumn > &join_columns, const std::vector< JoinColumnTypeInfo > &join_column_types, const std::vector< JoinBucketInfo > &join_bucket_info, const BaselineHashTableEntryInfo hash_table_entry_info)
bool isInnerColCompressed() const
void putHashTableOnCpuToCache(QueryPlanHash key, CacheItemType item_type, std::shared_ptr< HashTable > hashtable_ptr, DeviceIdentifier device_identifier, size_t hashtable_building_time)
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:412
static constexpr DeviceIdentifier CPU_DEVICE_IDENTIFIER
Definition: DataRecycler.h:136
static bool layoutRequiresAdditionalBuffers(HashType layout) noexcept
Definition: HashJoin.h:175
static CompositeKeyInfo getCompositeKeyInfo(const std::vector< InnerOuter > &inner_outer_pairs, const Executor *executor, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs={})
Definition: HashJoin.cpp:470

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool RangeJoinHashTable::isInnerColCompressed ( ) const
inlineprivate

Definition at line 112 of file RangeJoinHashTable.h.

References SQLTypeInfo::get_compression(), Analyzer::RangeOper::get_left_operand(), Analyzer::Expr::get_type_info(), kENCODING_GEOINT, and range_expr_.

Referenced by approximateTupleCount(), and initHashTableOnCpu().

112  {
115  }
const Expr * get_left_operand() const
Definition: Analyzer.h:552
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:79
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
const Analyzer::RangeOper * range_expr_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool RangeJoinHashTable::isProbeCompressed ( ) const
inlineprivate

Definition at line 117 of file RangeJoinHashTable.h.

References BoundingBoxIntersectJoinHashTable::getInnerOuterPairs(), and kENCODING_GEOINT.

Referenced by codegenKey().

117  {
118  const auto& inner_outer_pair = getInnerOuterPairs()[0];
119  const auto outer_col = inner_outer_pair.second;
120  const auto outer_col_ti = outer_col->get_type_info();
121 
122  return outer_col_ti.get_compression() == kENCODING_GEOINT;
123  }
const std::vector< InnerOuter > & getInnerOuterPairs() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void RangeJoinHashTable::reifyForDevice ( const ColumnsForDevice columns_for_device,
const HashType  layout,
const size_t  entry_count,
const size_t  emitted_keys_count,
const int  device_id,
const logger::ThreadLocalIds  parent_thread_local_ids 
)
protected

Definition at line 346 of file RangeJoinHashTable.cpp.

References CHECK, CHECK_EQ, CHECK_LT, Data_Namespace::CPU_LEVEL, DEBUG_TIMER_NEW_THREAD, effective_memory_level_, BoundingBoxIntersectJoinHashTable::getKeyComponentCount(), BoundingBoxIntersectJoinHashTable::getKeyComponentWidth(), Data_Namespace::GPU_LEVEL, HashJoin::hash_tables_for_device_, initHashTableOnCpu(), ColumnsForDevice::join_buckets, ColumnsForDevice::join_column_types, ColumnsForDevice::join_columns, HashJoin::layoutRequiresAdditionalBuffers(), BoundingBoxIntersectJoinHashTable::memory_level_, logger::ThreadLocalIds::setNewThreadId(), logger::ThreadLocalIds::thread_id_, UNREACHABLE, and VLOG.

Referenced by reifyWithLayout().

352  {
353  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
354  DEBUG_TIMER_NEW_THREAD(parent_thread_local_ids.thread_id_);
355  CHECK_EQ(getKeyComponentWidth(), size_t(8));
357  BaselineHashTableEntryInfo hash_table_entry_info(entry_count,
358  emitted_keys_count,
359  sizeof(int32_t),
362  layout,
363  false);
365  VLOG(1) << "Building range join hash table on CPU.";
366  auto hash_table = initHashTableOnCpu(columns_for_device.join_columns,
367  columns_for_device.join_column_types,
368  columns_for_device.join_buckets,
369  hash_table_entry_info);
370  CHECK(hash_table);
371 
372 #ifdef HAVE_CUDA
374  auto gpu_hash_table = copyCpuHashTableToGpu(hash_table, device_id);
375  CHECK_LT(size_t(device_id), hash_tables_for_device_.size());
376  hash_tables_for_device_[device_id] = std::move(gpu_hash_table);
377  } else {
378 #else
380 #endif
381  CHECK_EQ(hash_tables_for_device_.size(), size_t(1));
382  hash_tables_for_device_[0] = std::move(hash_table);
383 #ifdef HAVE_CUDA
384  }
385 #endif
386  } else {
387 #ifdef HAVE_CUDA
388  auto hash_table = initHashTableOnGpu(columns_for_device.join_columns,
389  columns_for_device.join_column_types,
390  columns_for_device.join_buckets,
391  hash_table_entry_info,
392  device_id);
393  CHECK_LT(size_t(device_id), hash_tables_for_device_.size());
394  hash_tables_for_device_[device_id] = std::move(hash_table);
395 #else
396  UNREACHABLE();
397 #endif
398  }
399 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< std::shared_ptr< HashTable > > hash_tables_for_device_
Definition: HashJoin.h:377
#define UNREACHABLE()
Definition: Logger.h:338
#define DEBUG_TIMER_NEW_THREAD(parent_thread_id)
Definition: Logger.h:417
const std::vector< JoinColumnTypeInfo > join_column_types
Definition: HashJoin.h:111
const Data_Namespace::MemoryLevel memory_level_
#define CHECK_LT(x, y)
Definition: Logger.h:303
LocalIdsScopeGuard setNewThreadId() const
Definition: Logger.cpp:538
std::shared_ptr< BaselineHashTable > initHashTableOnCpu(const std::vector< JoinColumn > &join_columns, const std::vector< JoinColumnTypeInfo > &join_column_types, const std::vector< JoinBucketInfo > &join_bucket_info, const BaselineHashTableEntryInfo hash_table_entry_info)
#define CHECK(condition)
Definition: Logger.h:291
Data_Namespace::MemoryLevel effective_memory_level_
ThreadId thread_id_
Definition: Logger.h:138
std::vector< JoinBucketInfo > join_buckets
Definition: HashJoin.h:113
const std::vector< JoinColumn > join_columns
Definition: HashJoin.h:110
#define VLOG(n)
Definition: Logger.h:388
static bool layoutRequiresAdditionalBuffers(HashType layout) noexcept
Definition: HashJoin.h:175

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void RangeJoinHashTable::reifyWithLayout ( const HashType  layout)
overrideprotectedvirtual

Reimplemented from BoundingBoxIntersectJoinHashTable.

Definition at line 155 of file RangeJoinHashTable.cpp.

References threading_serial::async(), BBOX_INTERSECT_HT, bucket_threshold_, CompositeKeyInfo::cache_key_chunks, BoundingBoxIntersectJoinHashTable::calculateHashTableSize(), CHECK, CHECK_EQ, CHECK_LT, HashJoin::collectFragmentIds(), BoundingBoxIntersectJoinHashTable::composite_key_info_, computeRangeHashTableCounts(), BoundingBoxIntersectJoinHashTable::condition_, DataRecyclerUtil::CPU_DEVICE_IDENTIFIER, BoundingBoxIntersectJoinHashTable::cpu_hash_table_buff_mutex_, Data_Namespace::CPU_LEVEL, DEBUG_TIMER, BoundingBoxIntersectJoinHashTable::device_count_, effective_memory_level_, BoundingBoxIntersectJoinHashTable::executor_, BoundingBoxIntersectJoinHashTable::fetchColumnsForDevice(), BoundingBoxIntersectJoinHashTable::generateCacheKey(), get_inner_query_info(), Analyzer::RangeOper::get_left_operand(), Analyzer::RangeOper::get_right_operand(), BoundingBoxIntersectJoinHashTable::getAlternativeCacheKey(), DataRecyclerUtil::getAlternativeTableKeys(), BoundingBoxIntersectJoinHashTable::getEffectiveMemoryLevel(), HashtableRecycler::getHashtableAccessPathInfo(), HashJoin::getHashTypeString(), HashJoin::getInnerTableId(), BoundingBoxIntersectJoinHashTable::getInnerTableId(), getQueryEngineCudaStreamForDevice(), Data_Namespace::GPU_LEVEL, BoundingBoxIntersectJoinHashTable::hash_table_cache_, HashJoin::hash_tables_for_device_, hash_value(), BoundingBoxIntersectJoinHashTable::hashtable_build_dag_map_, BoundingBoxIntersectJoinHashTable::hashtable_cache_key_, InputTableInfo::info, BoundingBoxIntersectJoinHashTable::initHashTableOnCpuFromCache(), BoundingBoxIntersectJoinHashTable::inner_outer_pairs_, BoundingBoxIntersectJoinHashTable::inverse_bucket_sizes_for_dimension_, HashtableRecycler::isInvalidHashTableCacheKey(), BoundingBoxIntersectJoinHashTable::join_type_, BoundingBoxIntersectJoinHashTable::layout_override_, ManyToMany, max_hashtable_size_, BoundingBoxIntersectJoinHashTable::memory_level_, OneToMany, only_shards_for_device(), BoundingBoxIntersectJoinHashTable::query_infos_, range_expr_, reifyForDevice(), BoundingBoxIntersectJoinHashTable::setBoundingBoxIntersectionMetaInfo(), BoundingBoxIntersectJoinHashTable::setInverseBucketSizeInfo(), BoundingBoxIntersectJoinHashTable::shardCount(), BoundingBoxIntersectJoinHashTable::table_keys_, logger::thread_local_ids(), UNREACHABLE, and VLOG.

155  {
156  auto timer = DEBUG_TIMER(__func__);
157  CHECK(layout == HashType::OneToMany);
158 
159  const auto& query_info =
161  .info;
162 
163  if (query_info.fragments.empty()) {
164  return;
165  }
166 
167  const auto& table_key = getInnerTableId();
168  VLOG(1) << "Reify with layout " << getHashTypeString(layout) << "for " << table_key;
169 
170  std::vector<ColumnsForDevice> columns_per_device;
171 
172  auto data_mgr = executor_->getDataMgr();
173  std::vector<std::vector<Fragmenter_Namespace::FragmentInfo>> fragments_per_device;
174  std::vector<std::unique_ptr<CudaAllocator>> dev_buff_owners;
175  const auto shard_count = shardCount();
176  for (int device_id = 0; device_id < device_count_; ++device_id) {
177  fragments_per_device.emplace_back(
178  shard_count
179  ? only_shards_for_device(query_info.fragments, device_id, device_count_)
180  : query_info.fragments);
182  dev_buff_owners.emplace_back(std::make_unique<CudaAllocator>(
183  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id)));
184  }
185  // for bounding box intersection, we need to fetch columns regardless of the
186  // availability of cached hash table to calculate various params, i.e., bucket size
187  // info todo(yoonmin) : relax this
188  const auto columns_for_device =
189  fetchColumnsForDevice(fragments_per_device[device_id],
190  device_id,
192  ? dev_buff_owners[device_id].get()
193  : nullptr);
194  columns_per_device.push_back(columns_for_device);
195  }
196 
198 
199  const auto bucket_range =
200  dynamic_cast<const Analyzer::Constant*>(range_expr_->get_right_operand());
201 
202  CHECK(bucket_range);
203  CHECK(bucket_range->get_type_info().is_fp() &&
204  bucket_range->get_type_info().get_size() == 8); // TODO
205 
206  const auto bucket_range_datum = bucket_range->get_constval();
207 
208  inverse_bucket_sizes_for_dimension_.emplace_back(1. / bucket_range_datum.doubleval);
209  inverse_bucket_sizes_for_dimension_.emplace_back(1. / bucket_range_datum.doubleval);
210 
212  inverse_bucket_sizes_for_dimension_, columns_per_device, device_count_);
213 
215 
216  // to properly lookup cached hash table, we need to use join columns listed as lhs and
217  // rhs of the bbox intersect op instead of physical (and hidden) column tailored to
218  // range join expr in other words, we need to use geometry column (point) instead of its
219  // hidden array column i.e., see `get_physical_cols` function
220  std::vector<InnerOuter> inner_outer_pairs_for_cache_lookup;
221  inner_outer_pairs_for_cache_lookup.emplace_back(InnerOuter{
222  dynamic_cast<const Analyzer::ColumnVar*>(range_expr_->get_left_operand()),
223  condition_->get_left_operand()});
224  auto hashtable_access_path_info =
225  HashtableRecycler::getHashtableAccessPathInfo(inner_outer_pairs_for_cache_lookup,
226  {},
227  condition_->get_optype(),
228  join_type_,
231  shard_count,
232  fragments_per_device,
233  executor_);
234  hashtable_cache_key_ = hashtable_access_path_info.hashed_query_plan_dag;
235  table_keys_ = hashtable_access_path_info.table_keys;
236 
237  auto get_inner_table_key = [&inner_outer_pairs_for_cache_lookup]() {
238  auto col_var = inner_outer_pairs_for_cache_lookup.front().first;
239  return col_var->getTableKey();
240  };
241 
242  if (table_keys_.empty()) {
243  const auto& inner_table_key = get_inner_table_key();
245  composite_key_info_.cache_key_chunks, inner_table_key);
246  }
247  CHECK(!table_keys_.empty());
248 
254  fragments_per_device,
255  device_count_);
256 
257  if (HashtableRecycler::isInvalidHashTableCacheKey(hashtable_cache_key_) &&
258  get_inner_table_key().table_id > 0) {
259  std::vector<size_t> per_device_chunk_key;
260  for (int device_id = 0; device_id < device_count_; ++device_id) {
262  boost::hash_combine(chunk_key_hash,
263  HashJoin::collectFragmentIds(fragments_per_device[device_id]));
264  per_device_chunk_key.push_back(chunk_key_hash);
265  AlternativeCacheKeyForBoundingBoxIntersection cache_key{
266  inner_outer_pairs_for_cache_lookup,
267  columns_per_device.front().join_columns.front().num_elems,
268  chunk_key_hash,
269  condition_->get_optype(),
272  {}};
273  hashtable_cache_key_[device_id] = getAlternativeCacheKey(cache_key);
274  hash_table_cache_->addQueryPlanDagForTableKeys(hashtable_cache_key_[device_id],
275  table_keys_);
276  }
277  }
278 
280  std::lock_guard<std::mutex> cpu_hash_table_buff_lock(cpu_hash_table_buff_mutex_);
281  if (auto generic_hash_table =
282  initHashTableOnCpuFromCache(hashtable_cache_key_.front(),
285  if (auto hash_table =
286  std::dynamic_pointer_cast<BaselineHashTable>(generic_hash_table)) {
287  // See if a hash table of a different layout was returned.
288  // If it was OneToMany, we can reuse it on ManyToMany.
289  if (layout == HashType::ManyToMany &&
290  hash_table->getLayout() == HashType::OneToMany) {
291  // use the cached hash table
293  }
294 
296 #ifdef HAVE_CUDA
297  for (int device_id = 0; device_id < device_count_; ++device_id) {
298  auto gpu_hash_table = copyCpuHashTableToGpu(hash_table, device_id);
299  CHECK_LT(size_t(device_id), hash_tables_for_device_.size());
300  hash_tables_for_device_[device_id] = std::move(gpu_hash_table);
301  }
302 #else
303  UNREACHABLE();
304 #endif
305  } else {
307  CHECK_EQ(hash_tables_for_device_.size(), size_t(1));
308  // do not move hash_table to keep valid ptr of it within the hash table recycler
309  hash_tables_for_device_[0] = hash_table;
310  }
311  return;
312  }
313  }
314  }
315 
316  auto [entry_count, emitted_keys_count] =
317  computeRangeHashTableCounts(shard_count, columns_per_device);
318 
320  inverse_bucket_sizes_for_dimension_.size(), emitted_keys_count, entry_count);
321 
322  VLOG(1) << "Finalized range join hash table: entry count " << entry_count
323  << " hash table size " << hash_table_size;
324 
325  std::vector<std::future<void>> init_threads;
326  for (int device_id = 0; device_id < device_count_; ++device_id) {
327  init_threads.push_back(
330  this,
331  /* columns_for_device */ columns_per_device[device_id],
332  /* layout_type */ layout,
333  /* entry_count */ entry_count,
334  /* emitted_keys_count */ emitted_keys_count,
335  /* device_id */ device_id,
336  /* parent_thread_local_ids */ logger::thread_local_ids()));
337  }
338  for (auto& init_thread : init_threads) {
339  init_thread.wait();
340  }
341  for (auto& init_thread : init_threads) {
342  init_thread.get();
343  }
344 }
static std::vector< int > collectFragmentIds(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments)
Definition: HashJoin.cpp:461
#define CHECK_EQ(x, y)
Definition: Logger.h:301
void setBoundingBoxIntersectionMetaInfo(size_t max_table_size_bytes, double bucket_threshold, std::vector< double > &bucket_sizes)
Fragmenter_Namespace::TableInfo info
Definition: InputMetadata.h:35
Data_Namespace::MemoryLevel getEffectiveMemoryLevel(const std::vector< InnerOuter > &inner_outer_pairs) const
size_t calculateHashTableSize(size_t number_of_dimensions, size_t emitted_keys_count, size_t entry_count) const
static bool isInvalidHashTableCacheKey(const std::vector< QueryPlanHash > &cache_keys)
std::pair< const Analyzer::ColumnVar *, const Analyzer::Expr * > InnerOuter
Definition: HashJoin.h:105
shared::TableKey getInnerTableId() const noexceptoverride
std::vector< ChunkKey > cache_key_chunks
Definition: HashJoin.h:129
std::vector< std::shared_ptr< HashTable > > hash_tables_for_device_
Definition: HashJoin.h:377
const std::shared_ptr< Analyzer::BinOper > condition_
#define UNREACHABLE()
Definition: Logger.h:338
const InputTableInfo & get_inner_query_info(const shared::TableKey &inner_table_key, const std::vector< InputTableInfo > &query_infos)
const Expr * get_left_operand() const
Definition: Analyzer.h:552
void setInverseBucketSizeInfo(const std::vector< double > &inverse_bucket_sizes, std::vector< ColumnsForDevice > &columns_per_device, const size_t device_count)
ColumnsForDevice fetchColumnsForDevice(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments, const int device_id, DeviceAllocator *dev_buff_owner)
future< Result > async(Fn &&fn, Args &&...args)
const Data_Namespace::MemoryLevel memory_level_
static std::unique_ptr< HashtableRecycler > hash_table_cache_
void generateCacheKey(const size_t max_hashtable_size, const double bucket_threshold, const std::vector< double > &bucket_sizes, std::vector< std::vector< Fragmenter_Namespace::FragmentInfo >> &fragments_per_device, int device_count)
std::vector< Fragmenter_Namespace::FragmentInfo > only_shards_for_device(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments, const int device_id, const int device_count)
const std::vector< InputTableInfo > & query_infos_
static std::unordered_set< size_t > getAlternativeTableKeys(const std::vector< ChunkKey > &chunk_keys, const shared::TableKey &inner_table_key)
Definition: DataRecycler.h:154
QueryPlanHash getAlternativeCacheKey(AlternativeCacheKeyForBoundingBoxIntersection &info)
std::pair< size_t, size_t > computeRangeHashTableCounts(const size_t shard_count, std::vector< ColumnsForDevice > &columns_per_device)
const double bucket_threshold_
const Expr * get_right_operand() const
Definition: Analyzer.h:553
virtual shared::TableKey getInnerTableId() const noexcept=0
#define CHECK_LT(x, y)
Definition: Logger.h:303
static std::string getHashTypeString(HashType ht) noexcept
Definition: HashJoin.h:179
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
std::size_t hash_value(RexAbstractInput const &rex_ab_input)
Definition: RelAlgDag.cpp:3525
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:412
void reifyForDevice(const ColumnsForDevice &columns_for_device, const HashType layout, const size_t entry_count, const size_t emitted_keys_count, const int device_id, const logger::ThreadLocalIds parent_thread_local_ids)
Data_Namespace::MemoryLevel effective_memory_level_
std::shared_ptr< HashTable > initHashTableOnCpuFromCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier)
const Analyzer::RangeOper * range_expr_
static constexpr DeviceIdentifier CPU_DEVICE_IDENTIFIER
Definition: DataRecycler.h:136
static HashtableAccessPathInfo getHashtableAccessPathInfo(const std::vector< InnerOuter > &inner_outer_pairs, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs, const SQLOps op_type, const JoinType join_type, const HashTableBuildDagMap &hashtable_build_dag_map, int device_count, int shard_count, const std::vector< std::vector< Fragmenter_Namespace::FragmentInfo >> &frags_for_device, Executor *executor)
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:880
#define VLOG(n)
Definition: Logger.h:388
const size_t max_hashtable_size_

+ Here is the call graph for this function:

Member Data Documentation

const double RangeJoinHashTable::bucket_threshold_ {std::numeric_limits<double>::max()}
private

Definition at line 128 of file RangeJoinHashTable.h.

Referenced by computeRangeHashTableCounts(), and reifyWithLayout().

Data_Namespace::MemoryLevel RangeJoinHashTable::effective_memory_level_
private

Definition at line 127 of file RangeJoinHashTable.h.

Referenced by approximateTupleCount(), reifyForDevice(), and reifyWithLayout().

std::shared_ptr<Analyzer::ColumnVar> RangeJoinHashTable::inner_col_expr_
private

Definition at line 126 of file RangeJoinHashTable.h.

const size_t RangeJoinHashTable::max_hashtable_size_ {std::numeric_limits<size_t>::max()}
private

Definition at line 129 of file RangeJoinHashTable.h.

Referenced by computeRangeHashTableCounts(), and reifyWithLayout().

const Analyzer::RangeOper* RangeJoinHashTable::range_expr_
private

Definition at line 125 of file RangeJoinHashTable.h.

Referenced by isInnerColCompressed(), and reifyWithLayout().


The documentation for this class was generated from the following files: