OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
BaselineJoinHashTableBuilder Class Reference

#include <BaselineHashTableBuilder.h>

Public Member Functions

 BaselineJoinHashTableBuilder ()=default
 
template<class KEY_HANDLER >
int initHashTableOnCpu (KEY_HANDLER *key_handler, const CompositeKeyInfo &composite_key_info, const std::vector< JoinColumn > &join_columns, const std::vector< JoinColumnTypeInfo > &join_column_types, const std::vector< JoinBucketInfo > &join_bucket_info, const StrProxyTranslationMapsPtrsAndOffsets &str_proxy_translation_maps_ptrs_and_offsets, const size_t keyspace_entry_count, const size_t keys_for_all_rows, const HashType layout, const JoinType join_type, const size_t key_component_width, const size_t key_component_count, const RegisteredQueryHint &query_hint)
 
void allocateDeviceMemory (const HashType layout, const size_t key_component_width, const size_t key_component_count, const size_t keyspace_entry_count, const size_t emitted_keys_count, const int device_id, const Executor *executor, const RegisteredQueryHint &query_hint)
 
template<class KEY_HANDLER >
int initHashTableOnGpu (KEY_HANDLER *key_handler, const std::vector< JoinColumn > &join_columns, const HashType layout, const JoinType join_type, const size_t key_component_width, const size_t key_component_count, const size_t keyspace_entry_count, const size_t emitted_keys_count, const int device_id, const Executor *executor, const RegisteredQueryHint &query_hint)
 
std::unique_ptr
< BaselineHashTable
getHashTable ()
 
void setHashLayout (HashType layout)
 
HashType getHashLayout () const
 

Private Attributes

std::unique_ptr
< BaselineHashTable
hash_table_
 
HashType layout_
 

Detailed Description

Definition at line 261 of file BaselineHashTableBuilder.h.

Constructor & Destructor Documentation

BaselineJoinHashTableBuilder::BaselineJoinHashTableBuilder ( )
default

Member Function Documentation

void BaselineJoinHashTableBuilder::allocateDeviceMemory ( const HashType  layout,
const size_t  key_component_width,
const size_t  key_component_count,
const size_t  keyspace_entry_count,
const size_t  emitted_keys_count,
const int  device_id,
const Executor executor,
const RegisteredQueryHint query_hint 
)
inline

Definition at line 503 of file BaselineHashTableBuilder.h.

References HashJoin::getHashTypeString(), hash_table_, RegisteredQueryHint::isHintRegistered(), kMaxJoinHashTableSize, HashJoin::layoutRequiresAdditionalBuffers(), RegisteredQueryHint::max_join_hash_table_size, OneToOne, UNREACHABLE, and VLOG.

Referenced by BaselineJoinHashTable::copyCpuHashTableToGpu(), and initHashTableOnGpu().

510  {
511 #ifdef HAVE_CUDA
512  const auto num_hash_entries =
513  (key_component_count + (layout == HashType::OneToOne ? 1 : 0));
514  const auto entry_size = num_hash_entries * key_component_width;
515  const size_t one_to_many_hash_entries =
517  ? 2 * keyspace_entry_count + emitted_keys_count
518  : 0;
519  const size_t hash_table_size =
520  entry_size * keyspace_entry_count + one_to_many_hash_entries * sizeof(int32_t);
521 
523  hash_table_size > query_hint.max_join_hash_table_size) {
524  throw JoinHashTableTooBig(hash_table_size, query_hint.max_join_hash_table_size);
525  }
526 
527  // We can't allocate more than 2GB contiguous memory on GPU and each entry is 4 bytes.
528  if (hash_table_size > static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
529  throw TooManyHashEntries(
530  "Hash tables for GPU requiring larger than 2GB contigious memory not supported "
531  "yet");
532  }
533 
534  VLOG(1) << "Initialize a GPU baseline hash table for device " << device_id
535  << " with join type " << HashJoin::getHashTypeString(layout)
536  << ", hash table size: " << hash_table_size << " Bytes"
537  << ", # hash entries: " << num_hash_entries << ", entry_size: " << entry_size
538  << ", # entries in the payload buffer: " << one_to_many_hash_entries
539  << " (# non-null hash entries: " << key_component_count
540  << ", # entries stored in the payload buffer: " << emitted_keys_count << ")";
541 
542  hash_table_ = std::make_unique<BaselineHashTable>(executor->getDataMgr(),
543  layout,
544  keyspace_entry_count,
545  emitted_keys_count,
546  hash_table_size,
547  device_id);
548 #else
549  UNREACHABLE();
550 #endif
551  }
#define UNREACHABLE()
Definition: Logger.h:337
std::unique_ptr< BaselineHashTable > hash_table_
size_t max_join_hash_table_size
Definition: QueryHint.h:325
static std::string getHashTypeString(HashType ht) noexcept
Definition: HashJoin.h:165
bool isHintRegistered(const QueryHint hint) const
Definition: QueryHint.h:348
#define VLOG(n)
Definition: Logger.h:387
static bool layoutRequiresAdditionalBuffers(HashType layout) noexcept
Definition: HashJoin.h:161

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

HashType BaselineJoinHashTableBuilder::getHashLayout ( ) const
inline

Definition at line 694 of file BaselineHashTableBuilder.h.

References layout_.

std::unique_ptr<BaselineHashTable> BaselineJoinHashTableBuilder::getHashTable ( )
inline

Definition at line 690 of file BaselineHashTableBuilder.h.

References hash_table_.

Referenced by BaselineJoinHashTable::copyCpuHashTableToGpu(), and BaselineJoinHashTable::initHashTableForDevice().

690 { return std::move(hash_table_); }
std::unique_ptr< BaselineHashTable > hash_table_

+ Here is the caller graph for this function:

template<class KEY_HANDLER >
int BaselineJoinHashTableBuilder::initHashTableOnCpu ( KEY_HANDLER *  key_handler,
const CompositeKeyInfo composite_key_info,
const std::vector< JoinColumn > &  join_columns,
const std::vector< JoinColumnTypeInfo > &  join_column_types,
const std::vector< JoinBucketInfo > &  join_bucket_info,
const StrProxyTranslationMapsPtrsAndOffsets str_proxy_translation_maps_ptrs_and_offsets,
const size_t  keyspace_entry_count,
const size_t  keys_for_all_rows,
const HashType  layout,
const JoinType  join_type,
const size_t  key_component_width,
const size_t  key_component_count,
const RegisteredQueryHint query_hint 
)
inline

Definition at line 265 of file BaselineHashTableBuilder.h.

References ANTI, threading_serial::async(), CHECK, cpu_threads(), DEBUG_TIMER, DEBUG_TIMER_NEW_THREAD, fill_one_to_many_baseline_hash_table_32(), fill_one_to_many_baseline_hash_table_64(), HashJoin::getHashTypeString(), hash_table_, init_baseline_hash_join_buff_32(), init_baseline_hash_join_buff_64(), init_hash_join_buff(), RegisteredQueryHint::isHintRegistered(), kMaxJoinHashTableSize, HashJoin::layoutRequiresAdditionalBuffers(), RegisteredQueryHint::max_join_hash_table_size, OneToOne, SEMI, setHashLayout(), logger::thread_local_ids(), UNREACHABLE, VLOG, and WINDOW_FUNCTION_FRAMING.

Referenced by BaselineJoinHashTable::initHashTableForDevice(), and OverlapsJoinHashTable::initHashTableOnCpu().

278  {
279  auto timer = DEBUG_TIMER(__func__);
280  auto const entry_cnt = (key_component_count + (layout == HashType::OneToOne ? 1 : 0));
281  auto const entry_size = entry_cnt * key_component_width;
282  size_t const one_to_many_hash_entries =
284  ? 2 * keyspace_entry_count +
285  (keys_for_all_rows *
286  (1 + (join_type == JoinType::WINDOW_FUNCTION_FRAMING)))
287  : 0;
288  size_t const hash_table_size =
289  entry_size * keyspace_entry_count + one_to_many_hash_entries * sizeof(int32_t);
290 
292  hash_table_size > query_hint.max_join_hash_table_size) {
293  throw JoinHashTableTooBig(hash_table_size, query_hint.max_join_hash_table_size);
294  }
295 
296  // We can't allocate more than 2GB contiguous memory on GPU and each entry is 4 bytes.
297  if (hash_table_size > static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
298  throw TooManyHashEntries(
299  "Hash tables for GPU requiring larger than 2GB contigious memory not supported "
300  "yet");
301  }
302  const bool for_semi_join =
303  (join_type == JoinType::SEMI || join_type == JoinType::ANTI) &&
304  layout == HashType::OneToOne;
305 
306  hash_table_ = std::make_unique<BaselineHashTable>(
307  layout, keyspace_entry_count, keys_for_all_rows, hash_table_size);
308  VLOG(1) << "Initialize a CPU baseline hash table for join type "
309  << HashJoin::getHashTypeString(layout)
310  << ", hash table size: " << hash_table_size << " Bytes"
311  << ", # hash entries: " << entry_cnt << ", entry_size: " << entry_size
312  << ", # entries in the payload buffer: " << one_to_many_hash_entries
313  << " (# non-null hash entries: " << keyspace_entry_count
314  << ", # entries stored in the payload buffer: " << keys_for_all_rows << ")";
315  auto cpu_hash_table_ptr = hash_table_->getCpuBuffer();
316  int thread_count = cpu_threads();
317  std::vector<std::future<void>> init_cpu_buff_threads;
318  setHashLayout(layout);
319  {
320  auto timer_init = DEBUG_TIMER("CPU Baseline-Hash: init_baseline_hash_join_buff_32");
321 #ifdef HAVE_TBB
322  switch (key_component_width) {
323  case 4:
324  init_baseline_hash_join_buff_tbb_32(cpu_hash_table_ptr,
325  keyspace_entry_count,
326  key_component_count,
327  layout == HashType::OneToOne,
328  -1);
329  break;
330  case 8:
331  init_baseline_hash_join_buff_tbb_64(cpu_hash_table_ptr,
332  keyspace_entry_count,
333  key_component_count,
334  layout == HashType::OneToOne,
335  -1);
336  break;
337  default:
338  CHECK(false);
339  }
340 #else // #ifdef HAVE_TBB
341  for (int thread_idx = 0; thread_idx < thread_count; ++thread_idx) {
342  init_cpu_buff_threads.emplace_back(std::async(
344  [keyspace_entry_count,
345  key_component_count,
346  key_component_width,
347  thread_idx,
348  thread_count,
349  cpu_hash_table_ptr,
350  layout,
351  parent_thread_local_ids = logger::thread_local_ids()] {
352  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
353  DEBUG_TIMER_NEW_THREAD(parent_thread_local_ids.thread_id_);
354  switch (key_component_width) {
355  case 4:
356  init_baseline_hash_join_buff_32(cpu_hash_table_ptr,
357  keyspace_entry_count,
358  key_component_count,
359  layout == HashType::OneToOne,
360  -1,
361  thread_idx,
362  thread_count);
363  break;
364  case 8:
365  init_baseline_hash_join_buff_64(cpu_hash_table_ptr,
366  keyspace_entry_count,
367  key_component_count,
368  layout == HashType::OneToOne,
369  -1,
370  thread_idx,
371  thread_count);
372  break;
373  default:
374  UNREACHABLE();
375  }
376  }));
377  }
378  for (auto& child : init_cpu_buff_threads) {
379  child.get();
380  }
381 #endif // !HAVE_TBB
382  }
383  std::vector<std::future<int>> fill_cpu_buff_threads;
384  for (int thread_idx = 0; thread_idx < thread_count; ++thread_idx) {
385  fill_cpu_buff_threads.emplace_back(std::async(
387  [key_handler,
388  keyspace_entry_count,
389  &join_columns,
390  key_component_count,
391  key_component_width,
392  layout,
393  thread_idx,
394  cpu_hash_table_ptr,
395  thread_count,
396  for_semi_join,
397  parent_thread_local_ids = logger::thread_local_ids()] {
398  logger::LocalIdsScopeGuard lisg = parent_thread_local_ids.setNewThreadId();
399  DEBUG_TIMER_NEW_THREAD(parent_thread_local_ids.thread_id_);
400  switch (key_component_width) {
401  case 4: {
402  return fill_baseline_hash_join_buff<int32_t>(cpu_hash_table_ptr,
403  keyspace_entry_count,
404  -1,
405  for_semi_join,
406  key_component_count,
407  layout == HashType::OneToOne,
408  key_handler,
409  join_columns[0].num_elems,
410  thread_idx,
411  thread_count);
412  break;
413  }
414  case 8: {
415  return fill_baseline_hash_join_buff<int64_t>(cpu_hash_table_ptr,
416  keyspace_entry_count,
417  -1,
418  for_semi_join,
419  key_component_count,
420  layout == HashType::OneToOne,
421  key_handler,
422  join_columns[0].num_elems,
423  thread_idx,
424  thread_count);
425  break;
426  }
427  default:
428  CHECK(false);
429  }
430  return -1;
431  }));
432  }
433  int err = 0;
434  for (auto& child : fill_cpu_buff_threads) {
435  int partial_err = child.get();
436  if (partial_err) {
437  err = partial_err;
438  }
439  }
440  if (err) {
441  return err;
442  }
444  auto one_to_many_buff = reinterpret_cast<int32_t*>(
445  cpu_hash_table_ptr + keyspace_entry_count * entry_size);
446  {
447  auto timer_init_additional_buffers =
448  DEBUG_TIMER("CPU Baseline-Hash: Additional Buffers init_hash_join_buff");
449  init_hash_join_buff(one_to_many_buff, keyspace_entry_count, -1, 0, 1);
450  }
451  bool is_geo_compressed = false;
452  if constexpr (std::is_same_v<KEY_HANDLER, RangeKeyHandler>) {
453  if (const auto range_handler =
454  reinterpret_cast<const RangeKeyHandler*>(key_handler)) {
455  is_geo_compressed = range_handler->is_compressed_;
456  }
457  }
458  setHashLayout(layout);
459  switch (key_component_width) {
460  case 4: {
461  const auto composite_key_dict = reinterpret_cast<int32_t*>(cpu_hash_table_ptr);
463  one_to_many_buff,
464  composite_key_dict,
465  keyspace_entry_count,
466  key_component_count,
467  join_columns,
468  join_column_types,
469  join_bucket_info,
470  str_proxy_translation_maps_ptrs_and_offsets.first,
471  str_proxy_translation_maps_ptrs_and_offsets.second,
472  thread_count,
473  std::is_same_v<KEY_HANDLER, RangeKeyHandler>,
474  is_geo_compressed,
475  join_type == JoinType::WINDOW_FUNCTION_FRAMING);
476  break;
477  }
478  case 8: {
479  const auto composite_key_dict = reinterpret_cast<int64_t*>(cpu_hash_table_ptr);
481  one_to_many_buff,
482  composite_key_dict,
483  keyspace_entry_count,
484  key_component_count,
485  join_columns,
486  join_column_types,
487  join_bucket_info,
488  str_proxy_translation_maps_ptrs_and_offsets.first,
489  str_proxy_translation_maps_ptrs_and_offsets.second,
490  thread_count,
491  std::is_same_v<KEY_HANDLER, RangeKeyHandler>,
492  is_geo_compressed,
493  join_type == JoinType::WINDOW_FUNCTION_FRAMING);
494  break;
495  }
496  default:
497  CHECK(false);
498  }
499  }
500  return err;
501  }
void init_baseline_hash_join_buff_32(int8_t *hash_join_buff, const int64_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val, const int32_t cpu_thread_idx, const int32_t cpu_thread_count)
void fill_one_to_many_baseline_hash_table_64(int32_t *buff, const int64_t *composite_key_dict, const int64_t hash_entry_count, const size_t key_component_count, const std::vector< JoinColumn > &join_column_per_key, const std::vector< JoinColumnTypeInfo > &type_info_per_key, const std::vector< JoinBucketInfo > &join_bucket_info, const std::vector< const int32_t * > &sd_inner_to_outer_translation_maps, const std::vector< int32_t > &sd_min_inner_elems, const int32_t cpu_thread_count, const bool is_range_join, const bool is_geo_compressed, const bool for_window_framing)
void fill_one_to_many_baseline_hash_table_32(int32_t *buff, const int32_t *composite_key_dict, const int64_t hash_entry_count, const size_t key_component_count, const std::vector< JoinColumn > &join_column_per_key, const std::vector< JoinColumnTypeInfo > &type_info_per_key, const std::vector< JoinBucketInfo > &join_bucket_info, const std::vector< const int32_t * > &sd_inner_to_outer_translation_maps, const std::vector< int32_t > &sd_min_inner_elems, const int32_t cpu_thread_count, const bool is_range_join, const bool is_geo_compressed, const bool for_window_framing)
#define UNREACHABLE()
Definition: Logger.h:337
#define DEBUG_TIMER_NEW_THREAD(parent_thread_id)
Definition: Logger.h:416
void init_baseline_hash_join_buff_64(int8_t *hash_join_buff, const int64_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val, const int32_t cpu_thread_idx, const int32_t cpu_thread_count)
std::unique_ptr< BaselineHashTable > hash_table_
size_t max_join_hash_table_size
Definition: QueryHint.h:325
future< Result > async(Fn &&fn, Args &&...args)
static std::string getHashTypeString(HashType ht) noexcept
Definition: HashJoin.h:165
bool isHintRegistered(const QueryHint hint) const
Definition: QueryHint.h:348
DEVICE void SUFFIX() init_hash_join_buff(int32_t *groups_buffer, const int64_t hash_entry_count, const int32_t invalid_slot_val, const int32_t cpu_thread_idx, const int32_t cpu_thread_count)
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
int cpu_threads()
Definition: thread_count.h:25
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:874
#define VLOG(n)
Definition: Logger.h:387
static bool layoutRequiresAdditionalBuffers(HashType layout) noexcept
Definition: HashJoin.h:161

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class KEY_HANDLER >
int BaselineJoinHashTableBuilder::initHashTableOnGpu ( KEY_HANDLER *  key_handler,
const std::vector< JoinColumn > &  join_columns,
const HashType  layout,
const JoinType  join_type,
const size_t  key_component_width,
const size_t  key_component_count,
const size_t  keyspace_entry_count,
const size_t  emitted_keys_count,
const int  device_id,
const Executor executor,
const RegisteredQueryHint query_hint 
)
inline

Definition at line 554 of file BaselineHashTableBuilder.h.

References allocateDeviceMemory(), ANTI, CHECK, DEBUG_TIMER, getQueryEngineCudaStreamForDevice(), hash_table_, init_baseline_hash_join_buff_on_device_32(), init_baseline_hash_join_buff_on_device_64(), init_hash_join_buff_on_device(), HashJoin::layoutRequiresAdditionalBuffers(), OneToOne, SEMI, setHashLayout(), transfer_flat_object_to_gpu(), UNREACHABLE, and WINDOW_FUNCTION_FRAMING.

Referenced by BaselineJoinHashTable::initHashTableForDevice().

564  {
565  auto timer = DEBUG_TIMER(__func__);
566  int err = 0;
567 #ifdef HAVE_CUDA
568  allocateDeviceMemory(layout,
569  key_component_width,
570  key_component_count,
571  keyspace_entry_count,
572  emitted_keys_count,
573  device_id,
574  executor,
575  query_hint);
576  if (!keyspace_entry_count) {
577  // need to "allocate" the empty hash table first
578  CHECK(!emitted_keys_count);
579  return 0;
580  }
581  auto data_mgr = executor->getDataMgr();
582  auto allocator = std::make_unique<CudaAllocator>(
583  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
584  auto dev_err_buff = allocator->alloc(sizeof(int));
585 
586  allocator->copyToDevice(dev_err_buff, &err, sizeof(err));
587  auto gpu_hash_table_buff = hash_table_->getGpuBuffer();
588  CHECK(gpu_hash_table_buff);
589  const bool for_semi_join =
590  (join_type == JoinType::SEMI || join_type == JoinType::ANTI) &&
591  layout == HashType::OneToOne;
592  setHashLayout(layout);
593  const auto key_handler_gpu = transfer_flat_object_to_gpu(*key_handler, *allocator);
594  switch (key_component_width) {
595  case 4:
596  init_baseline_hash_join_buff_on_device_32(gpu_hash_table_buff,
597  keyspace_entry_count,
598  key_component_count,
599  layout == HashType::OneToOne,
600  -1);
601  break;
602  case 8:
603  init_baseline_hash_join_buff_on_device_64(gpu_hash_table_buff,
604  keyspace_entry_count,
605  key_component_count,
606  layout == HashType::OneToOne,
607  -1);
608  break;
609  default:
610  UNREACHABLE();
611  }
612  switch (key_component_width) {
613  case 4: {
614  fill_baseline_hash_join_buff_on_device<int32_t>(
615  gpu_hash_table_buff,
616  keyspace_entry_count,
617  -1,
618  for_semi_join,
619  key_component_count,
620  layout == HashType::OneToOne,
621  reinterpret_cast<int*>(dev_err_buff),
622  key_handler_gpu,
623  join_columns.front().num_elems);
624  allocator->copyFromDevice(&err, dev_err_buff, sizeof(err));
625  break;
626  }
627  case 8: {
628  fill_baseline_hash_join_buff_on_device<int64_t>(
629  gpu_hash_table_buff,
630  keyspace_entry_count,
631  -1,
632  for_semi_join,
633  key_component_count,
634  layout == HashType::OneToOne,
635  reinterpret_cast<int*>(dev_err_buff),
636  key_handler_gpu,
637  join_columns.front().num_elems);
638  allocator->copyFromDevice(&err, dev_err_buff, sizeof(err));
639  break;
640  }
641  default:
642  UNREACHABLE();
643  }
644  if (err) {
645  return err;
646  }
648  const auto entry_size = key_component_count * key_component_width;
649  auto one_to_many_buff = reinterpret_cast<int32_t*>(
650  gpu_hash_table_buff + keyspace_entry_count * entry_size);
651  init_hash_join_buff_on_device(one_to_many_buff, keyspace_entry_count, -1);
652  setHashLayout(layout);
653  switch (key_component_width) {
654  case 4: {
655  const auto composite_key_dict = reinterpret_cast<int32_t*>(gpu_hash_table_buff);
656  fill_one_to_many_baseline_hash_table_on_device<int32_t>(
657  one_to_many_buff,
658  composite_key_dict,
659  keyspace_entry_count,
660  key_component_count,
661  key_handler_gpu,
662  join_columns.front().num_elems,
663  join_type == JoinType::WINDOW_FUNCTION_FRAMING);
664 
665  break;
666  }
667  case 8: {
668  const auto composite_key_dict = reinterpret_cast<int64_t*>(gpu_hash_table_buff);
669  fill_one_to_many_baseline_hash_table_on_device<int64_t>(
670  one_to_many_buff,
671  composite_key_dict,
672  keyspace_entry_count,
673  key_component_count,
674  key_handler_gpu,
675  join_columns.front().num_elems,
676  join_type == JoinType::WINDOW_FUNCTION_FRAMING);
677 
678  break;
679  }
680  default:
681  UNREACHABLE();
682  }
683  }
684 #else
685  UNREACHABLE();
686 #endif
687  return err;
688  }
T * transfer_flat_object_to_gpu(const T &object, DeviceAllocator &allocator)
void init_baseline_hash_join_buff_on_device_64(int8_t *hash_join_buff, const int64_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val)
#define UNREACHABLE()
Definition: Logger.h:337
std::unique_ptr< BaselineHashTable > hash_table_
void init_baseline_hash_join_buff_on_device_32(int8_t *hash_join_buff, const int64_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val)
void allocateDeviceMemory(const HashType layout, const size_t key_component_width, const size_t key_component_count, const size_t keyspace_entry_count, const size_t emitted_keys_count, const int device_id, const Executor *executor, const RegisteredQueryHint &query_hint)
void init_hash_join_buff_on_device(int32_t *buff, const int64_t entry_count, const int32_t invalid_slot_val)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
static bool layoutRequiresAdditionalBuffers(HashType layout) noexcept
Definition: HashJoin.h:161

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void BaselineJoinHashTableBuilder::setHashLayout ( HashType  layout)
inline

Definition at line 692 of file BaselineHashTableBuilder.h.

References layout_.

Referenced by initHashTableOnCpu(), and initHashTableOnGpu().

692 { layout_ = layout; }

+ Here is the caller graph for this function:

Member Data Documentation

std::unique_ptr<BaselineHashTable> BaselineJoinHashTableBuilder::hash_table_
private
HashType BaselineJoinHashTableBuilder::layout_
private

Definition at line 698 of file BaselineHashTableBuilder.h.

Referenced by getHashLayout(), and setHashLayout().


The documentation for this class was generated from the following file: