OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
HashJoin.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
22 #include "QueryEngine/Execute.h"
30 
31 #include <sstream>
32 
33 extern bool g_enable_overlaps_hashjoin;
34 
36  const std::vector<double>& inverse_bucket_sizes_for_dimension,
37  const std::vector<InnerOuter> inner_outer_pairs) {
38  join_buckets.clear();
39 
40  CHECK_EQ(inner_outer_pairs.size(), join_columns.size());
41  CHECK_EQ(join_columns.size(), join_column_types.size());
42  for (size_t i = 0; i < join_columns.size(); i++) {
43  const auto& inner_outer_pair = inner_outer_pairs[i];
44  const auto inner_col = inner_outer_pair.first;
45  const auto& ti = inner_col->get_type_info();
46  const auto elem_ti = ti.get_elem_type();
47  // CHECK(elem_ti.is_fp());
48 
49  join_buckets.emplace_back(JoinBucketInfo{inverse_bucket_sizes_for_dimension,
50  elem_ti.get_type() == kDOUBLE});
51  }
52 }
53 
59  const Analyzer::ColumnVar* hash_col,
60  const std::vector<Fragmenter_Namespace::FragmentInfo>& fragment_info,
61  const Data_Namespace::MemoryLevel effective_memory_level,
62  const int device_id,
63  std::vector<std::shared_ptr<Chunk_NS::Chunk>>& chunks_owner,
64  DeviceAllocator* dev_buff_owner,
65  std::vector<std::shared_ptr<void>>& malloc_owner,
66  Executor* executor,
67  ColumnCacheMap* column_cache) {
68  static std::mutex fragment_fetch_mutex;
69  std::lock_guard<std::mutex> fragment_fetch_lock(fragment_fetch_mutex);
70  try {
71  JoinColumn join_column = ColumnFetcher::makeJoinColumn(executor,
72  *hash_col,
73  fragment_info,
74  effective_memory_level,
75  device_id,
76  dev_buff_owner,
77  /*thread_idx=*/0,
78  chunks_owner,
79  malloc_owner,
80  *column_cache);
81  if (effective_memory_level == Data_Namespace::GPU_LEVEL) {
82  CHECK(dev_buff_owner);
83  auto device_col_chunks_buff = dev_buff_owner->alloc(join_column.col_chunks_buff_sz);
84  dev_buff_owner->copyToDevice(device_col_chunks_buff,
85  join_column.col_chunks_buff,
86  join_column.col_chunks_buff_sz);
87  join_column.col_chunks_buff = device_col_chunks_buff;
88  }
89  return join_column;
90  } catch (...) {
91  throw FailedToFetchColumn();
92  }
93 }
94 
95 namespace {
96 
97 template <typename T>
98 std::string toStringFlat(const HashJoin* hash_table,
99  const ExecutorDeviceType device_type,
100  const int device_id) {
101  auto mem =
102  reinterpret_cast<const T*>(hash_table->getJoinHashBuffer(device_type, device_id));
103  auto memsz = hash_table->getJoinHashBufferSize(device_type, device_id) / sizeof(T);
104  std::string txt;
105  for (size_t i = 0; i < memsz; ++i) {
106  if (i > 0) {
107  txt += ", ";
108  }
109  txt += std::to_string(mem[i]);
110  }
111  return txt;
112 }
113 
114 } // anonymous namespace
115 
116 std::string HashJoin::toStringFlat64(const ExecutorDeviceType device_type,
117  const int device_id) const {
118  return toStringFlat<int64_t>(this, device_type, device_id);
119 }
120 
121 std::string HashJoin::toStringFlat32(const ExecutorDeviceType device_type,
122  const int device_id) const {
123  return toStringFlat<int32_t>(this, device_type, device_id);
124 }
125 
126 std::ostream& operator<<(std::ostream& os, const DecodedJoinHashBufferEntry& e) {
127  os << " {{";
128  bool first = true;
129  for (auto k : e.key) {
130  if (!first) {
131  os << ",";
132  } else {
133  first = false;
134  }
135  os << k;
136  }
137  os << "}, ";
138  os << "{";
139  first = true;
140  for (auto p : e.payload) {
141  if (!first) {
142  os << ", ";
143  } else {
144  first = false;
145  }
146  os << p;
147  }
148  os << "}}";
149  return os;
150 }
151 
152 std::ostream& operator<<(std::ostream& os, const DecodedJoinHashBufferSet& s) {
153  os << "{\n";
154  bool first = true;
155  for (auto e : s) {
156  if (!first) {
157  os << ",\n";
158  } else {
159  first = false;
160  }
161  os << e;
162  }
163  if (!s.empty()) {
164  os << "\n";
165  }
166  os << "}\n";
167  return os;
168 }
169 
170 std::ostream& operator<<(std::ostream& os,
171  const InnerOuterStringOpInfos& inner_outer_string_op_infos) {
172  os << "(" << inner_outer_string_op_infos.first << ", "
173  << inner_outer_string_op_infos.second << ")";
174  return os;
175 }
176 
177 std::string toString(const InnerOuterStringOpInfos& inner_outer_string_op_infos) {
178  std::ostringstream os;
179  os << inner_outer_string_op_infos;
180  return os.str();
181 }
182 
183 std::ostream& operator<<(
184  std::ostream& os,
185  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_infos_pairs) {
186  os << "[";
187  bool first_elem = true;
188  for (const auto& inner_outer_string_op_infos : inner_outer_string_op_infos_pairs) {
189  if (!first_elem) {
190  os << ", ";
191  }
192  first_elem = false;
193  os << inner_outer_string_op_infos;
194  }
195  os << "]";
196  return os;
197 }
198 
199 std::string toString(
200  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_infos_pairs) {
201  std::ostringstream os;
202  os << inner_outer_string_op_infos_pairs;
203  return os.str();
204 }
205 
207  const std::vector<llvm::Value*>& hash_join_idx_args_in,
208  const bool is_sharded,
209  const bool col_is_nullable,
210  const bool is_bw_eq,
211  const int64_t sub_buff_size,
212  Executor* executor,
213  bool is_bucketized) {
214  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
215  using namespace std::string_literals;
216 
217  std::string fname(is_bucketized ? "bucketized_hash_join_idx"s : "hash_join_idx"s);
218 
219  if (is_bw_eq) {
220  fname += "_bitwise";
221  }
222  if (is_sharded) {
223  fname += "_sharded";
224  }
225  if (!is_bw_eq && col_is_nullable) {
226  fname += "_nullable";
227  }
228 
229  const auto slot_lv = executor->cgen_state_->emitCall(fname, hash_join_idx_args_in);
230  const auto slot_valid_lv = executor->cgen_state_->ir_builder_.CreateICmpSGE(
231  slot_lv, executor->cgen_state_->llInt(int64_t(0)));
232 
233  auto pos_ptr = hash_join_idx_args_in[0];
234  CHECK(pos_ptr);
235 
236  auto count_ptr = executor->cgen_state_->ir_builder_.CreateAdd(
237  pos_ptr, executor->cgen_state_->llInt(sub_buff_size));
238  auto hash_join_idx_args = hash_join_idx_args_in;
239  hash_join_idx_args[0] = executor->cgen_state_->ir_builder_.CreatePtrToInt(
240  count_ptr, llvm::Type::getInt64Ty(executor->cgen_state_->context_));
241 
242  const auto row_count_lv = executor->cgen_state_->ir_builder_.CreateSelect(
243  slot_valid_lv,
244  executor->cgen_state_->emitCall(fname, hash_join_idx_args),
245  executor->cgen_state_->llInt(int64_t(0)));
246  auto rowid_base_i32 = executor->cgen_state_->ir_builder_.CreateIntToPtr(
247  executor->cgen_state_->ir_builder_.CreateAdd(
248  pos_ptr, executor->cgen_state_->llInt(2 * sub_buff_size)),
249  llvm::Type::getInt32PtrTy(executor->cgen_state_->context_));
250  auto rowid_ptr_i32 = executor->cgen_state_->ir_builder_.CreateGEP(
251  rowid_base_i32->getType()->getScalarType()->getPointerElementType(),
252  rowid_base_i32,
253  slot_lv);
254  return {rowid_ptr_i32, row_count_lv, slot_lv};
255 }
256 
257 llvm::Value* HashJoin::codegenHashTableLoad(const size_t table_idx, Executor* executor) {
258  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
259  llvm::Value* hash_ptr = nullptr;
260  const auto total_table_count =
261  executor->plan_state_->join_info_.join_hash_tables_.size();
262  CHECK_LT(table_idx, total_table_count);
263  if (total_table_count > 1) {
264  auto hash_tables_ptr =
265  get_arg_by_name(executor->cgen_state_->row_func_, "join_hash_tables");
266  auto hash_pptr =
267  table_idx > 0
268  ? executor->cgen_state_->ir_builder_.CreateGEP(
269  hash_tables_ptr->getType()->getScalarType()->getPointerElementType(),
270  hash_tables_ptr,
271  executor->cgen_state_->llInt(static_cast<int64_t>(table_idx)))
272  : hash_tables_ptr;
273  hash_ptr = executor->cgen_state_->ir_builder_.CreateLoad(
274  hash_pptr->getType()->getPointerElementType(), hash_pptr);
275  } else {
276  hash_ptr = get_arg_by_name(executor->cgen_state_->row_func_, "join_hash_tables");
277  }
278  CHECK(hash_ptr);
279  return hash_ptr;
280 }
281 
283 std::shared_ptr<HashJoin> HashJoin::getInstance(
284  const std::shared_ptr<Analyzer::BinOper> qual_bin_oper,
285  const std::vector<InputTableInfo>& query_infos,
286  const Data_Namespace::MemoryLevel memory_level,
287  const JoinType join_type,
288  const HashType preferred_hash_type,
289  const int device_count,
290  ColumnCacheMap& column_cache,
291  Executor* executor,
292  const HashTableBuildDagMap& hashtable_build_dag_map,
293  const RegisteredQueryHint& query_hint,
294  const TableIdToNodeMap& table_id_to_node_map) {
295  auto timer = DEBUG_TIMER(__func__);
296  std::shared_ptr<HashJoin> join_hash_table;
297  CHECK_GT(device_count, 0);
298  if (!g_enable_overlaps_hashjoin && qual_bin_oper->is_overlaps_oper()) {
299  throw std::runtime_error(
300  "Overlaps hash join disabled, attempting to fall back to loop join");
301  }
302  if (qual_bin_oper->is_overlaps_oper()) {
303  VLOG(1) << "Trying to build geo hash table:";
304  join_hash_table = OverlapsJoinHashTable::getInstance(qual_bin_oper,
305  query_infos,
306  memory_level,
307  join_type,
308  device_count,
309  column_cache,
310  executor,
311  hashtable_build_dag_map,
312  query_hint,
313  table_id_to_node_map);
314  } else if (dynamic_cast<const Analyzer::ExpressionTuple*>(
315  qual_bin_oper->get_left_operand())) {
316  VLOG(1) << "Trying to build keyed hash table:";
317  join_hash_table = BaselineJoinHashTable::getInstance(qual_bin_oper,
318  query_infos,
319  memory_level,
320  join_type,
321  preferred_hash_type,
322  device_count,
323  column_cache,
324  executor,
325  hashtable_build_dag_map,
326  table_id_to_node_map);
327  } else {
328  try {
329  VLOG(1) << "Trying to build perfect hash table:";
330  join_hash_table = PerfectJoinHashTable::getInstance(qual_bin_oper,
331  query_infos,
332  memory_level,
333  join_type,
334  preferred_hash_type,
335  device_count,
336  column_cache,
337  executor,
338  hashtable_build_dag_map,
339  table_id_to_node_map);
340  } catch (TooManyHashEntries&) {
341  const auto join_quals = coalesce_singleton_equi_join(qual_bin_oper);
342  CHECK_EQ(join_quals.size(), size_t(1));
343  const auto join_qual =
344  std::dynamic_pointer_cast<Analyzer::BinOper>(join_quals.front());
345  VLOG(1) << "Trying to build keyed hash table after perfect hash table:";
346  join_hash_table = BaselineJoinHashTable::getInstance(join_qual,
347  query_infos,
348  memory_level,
349  join_type,
350  preferred_hash_type,
351  device_count,
352  column_cache,
353  executor,
354  hashtable_build_dag_map,
355  table_id_to_node_map);
356  }
357  }
358  CHECK(join_hash_table);
359  if (VLOGGING(2)) {
360  if (join_hash_table->getMemoryLevel() == Data_Namespace::MemoryLevel::GPU_LEVEL) {
361  for (int device_id = 0; device_id < join_hash_table->getDeviceCount();
362  ++device_id) {
363  if (join_hash_table->getJoinHashBufferSize(ExecutorDeviceType::GPU, device_id) <=
364  1000) {
365  VLOG(2) << "Built GPU hash table: "
366  << join_hash_table->toString(ExecutorDeviceType::GPU, device_id);
367  }
368  }
369  } else {
370  if (join_hash_table->getJoinHashBufferSize(ExecutorDeviceType::CPU) <= 1000) {
371  VLOG(2) << "Built CPU hash table: "
372  << join_hash_table->toString(ExecutorDeviceType::CPU);
373  }
374  }
375  }
376  return join_hash_table;
377 }
378 
379 std::pair<const StringDictionaryProxy*, StringDictionaryProxy*>
381  const Executor* executor,
382  const bool has_string_ops) {
383  const auto inner_col = cols.first;
384  CHECK(inner_col);
385  const auto inner_ti = inner_col->get_type_info();
386  const auto outer_col = dynamic_cast<const Analyzer::ColumnVar*>(cols.second);
387  std::pair<const StringDictionaryProxy*, StringDictionaryProxy*>
388  inner_outer_str_dict_proxies{nullptr, nullptr};
389  if (inner_ti.is_string() && outer_col) {
390  CHECK(outer_col->get_type_info().is_string());
391  inner_outer_str_dict_proxies.first =
392  executor->getStringDictionaryProxy(inner_col->get_comp_param(), true);
393  CHECK(inner_outer_str_dict_proxies.first);
394  inner_outer_str_dict_proxies.second =
395  executor->getStringDictionaryProxy(outer_col->get_comp_param(), true);
396  CHECK(inner_outer_str_dict_proxies.second);
397  if (!has_string_ops &&
398  *inner_outer_str_dict_proxies.first == *inner_outer_str_dict_proxies.second) {
399  // Dictionaries are the same - don't need to translate
400  CHECK(inner_col->get_comp_param() == outer_col->get_comp_param());
401  inner_outer_str_dict_proxies.first = nullptr;
402  inner_outer_str_dict_proxies.second = nullptr;
403  }
404  }
405  return inner_outer_str_dict_proxies;
406 }
407 
409  const InnerOuter& cols,
410  const InnerOuterStringOpInfos& inner_outer_string_op_infos,
411  ExpressionRange& col_range,
412  const Executor* executor) {
413  const bool has_string_ops = inner_outer_string_op_infos.first.size() ||
414  inner_outer_string_op_infos.second.size();
415  const auto inner_outer_proxies =
416  HashJoin::getStrDictProxies(cols, executor, has_string_ops);
417  const bool translate_dictionary =
418  inner_outer_proxies.first && inner_outer_proxies.second;
419  if (translate_dictionary) {
420  const auto inner_dict_id = inner_outer_proxies.first->getDictId();
421  const auto outer_dict_id = inner_outer_proxies.second->getDictId();
422  CHECK(has_string_ops || inner_dict_id != outer_dict_id);
423  const auto id_map = executor->getJoinIntersectionStringProxyTranslationMap(
424  inner_outer_proxies.first,
425  inner_outer_proxies.second,
426  inner_outer_string_op_infos.first,
427  inner_outer_string_op_infos.second,
428  executor->getRowSetMemoryOwner());
429  if (!inner_outer_string_op_infos.second.empty()) {
430  // String op was applied to lhs table,
431  // need to expand column range appropriately
432  col_range = ExpressionRange::makeIntRange(
433  std::min(col_range.getIntMin(),
434  static_cast<int64_t>(
435  inner_outer_proxies.second->transientEntryCount() + 1) *
436  -1),
437  col_range.getIntMax(),
438  0,
439  col_range.hasNulls());
440  }
441  return id_map;
442  }
443  return nullptr;
444 }
445 
447  const std::vector<Fragmenter_Namespace::FragmentInfo>& fragments) {
448  auto const fragment_id = [](auto const& frag_info) { return frag_info.fragmentId; };
449  std::vector<int> frag_ids(fragments.size());
450  std::transform(fragments.cbegin(), fragments.cend(), frag_ids.begin(), fragment_id);
451  std::sort(frag_ids.begin(), frag_ids.end());
452  return frag_ids;
453 }
454 
456  const std::vector<InnerOuter>& inner_outer_pairs,
457  const Executor* executor,
458  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_infos_pairs) {
459  CHECK(executor);
460  std::vector<const void*> sd_inner_proxy_per_key;
461  std::vector<void*> sd_outer_proxy_per_key;
462  std::vector<ChunkKey> cache_key_chunks; // used for the cache key
463  const auto db_id = executor->getCatalog()->getCurrentDB().dbId;
464  const bool has_string_op_infos = inner_outer_string_op_infos_pairs.size();
465  if (has_string_op_infos) {
466  CHECK_EQ(inner_outer_pairs.size(), inner_outer_string_op_infos_pairs.size());
467  }
468  size_t string_op_info_pairs_idx = 0;
469  for (const auto& inner_outer_pair : inner_outer_pairs) {
470  const auto inner_col = inner_outer_pair.first;
471  const auto outer_col = inner_outer_pair.second;
472  const auto& inner_ti = inner_col->get_type_info();
473  const auto& outer_ti = outer_col->get_type_info();
474  ChunkKey cache_key_chunks_for_column{
475  db_id, inner_col->get_table_id(), inner_col->get_column_id()};
476  if (inner_ti.is_string() &&
477  (!(inner_ti.get_comp_param() == outer_ti.get_comp_param()) ||
478  (has_string_op_infos &&
479  (inner_outer_string_op_infos_pairs[string_op_info_pairs_idx].first.size() ||
480  inner_outer_string_op_infos_pairs[string_op_info_pairs_idx].second.size())))) {
481  CHECK(outer_ti.is_string());
482  CHECK(inner_ti.get_compression() == kENCODING_DICT &&
483  outer_ti.get_compression() == kENCODING_DICT);
484  const auto sd_inner_proxy = executor->getStringDictionaryProxy(
485  inner_ti.get_comp_param(), executor->getRowSetMemoryOwner(), true);
486  auto sd_outer_proxy = executor->getStringDictionaryProxy(
487  outer_ti.get_comp_param(), executor->getRowSetMemoryOwner(), true);
488  CHECK(sd_inner_proxy && sd_outer_proxy);
489  sd_inner_proxy_per_key.push_back(sd_inner_proxy);
490  sd_outer_proxy_per_key.push_back(sd_outer_proxy);
491  cache_key_chunks_for_column.push_back(sd_outer_proxy->getGeneration());
492  } else {
493  sd_inner_proxy_per_key.emplace_back();
494  sd_outer_proxy_per_key.emplace_back();
495  }
496  cache_key_chunks.push_back(cache_key_chunks_for_column);
497  string_op_info_pairs_idx++;
498  }
499  return {sd_inner_proxy_per_key, sd_outer_proxy_per_key, cache_key_chunks};
500 }
501 
502 std::vector<const StringDictionaryProxy::IdMap*>
504  const CompositeKeyInfo& composite_key_info,
505  const std::vector<InnerOuterStringOpInfos>& string_op_infos_for_keys,
506  const Executor* executor) {
507  const auto& inner_proxies = composite_key_info.sd_inner_proxy_per_key;
508  const auto& outer_proxies = composite_key_info.sd_outer_proxy_per_key;
509  const size_t num_proxies = inner_proxies.size();
510  CHECK_EQ(num_proxies, outer_proxies.size());
511  std::vector<const StringDictionaryProxy::IdMap*> proxy_translation_maps;
512  proxy_translation_maps.reserve(num_proxies);
513  for (size_t proxy_pair_idx = 0; proxy_pair_idx < num_proxies; ++proxy_pair_idx) {
514  const bool translate_proxies =
515  inner_proxies[proxy_pair_idx] && outer_proxies[proxy_pair_idx];
516  if (translate_proxies) {
517  const auto inner_proxy =
518  reinterpret_cast<const StringDictionaryProxy*>(inner_proxies[proxy_pair_idx]);
519  auto outer_proxy =
520  reinterpret_cast<StringDictionaryProxy*>(outer_proxies[proxy_pair_idx]);
521  CHECK(inner_proxy);
522  CHECK(outer_proxy);
523 
524  CHECK_NE(inner_proxy->getDictId(), outer_proxy->getDictId());
525  proxy_translation_maps.emplace_back(
526  executor->getJoinIntersectionStringProxyTranslationMap(
527  inner_proxy,
528  outer_proxy,
529  string_op_infos_for_keys[proxy_pair_idx].first,
530  string_op_infos_for_keys[proxy_pair_idx].second,
531  executor->getRowSetMemoryOwner()));
532  } else {
533  proxy_translation_maps.emplace_back(nullptr);
534  }
535  }
536  return proxy_translation_maps;
537 }
538 
540  const Analyzer::Expr* col_or_string_oper,
541  const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos,
542  CodeGenerator& code_generator,
543  const CompilationOptions& co) {
544  if (!string_op_infos.empty()) {
545  const auto coerced_col_var =
546  dynamic_cast<const Analyzer::ColumnVar*>(col_or_string_oper);
547  CHECK(coerced_col_var);
548  std::vector<llvm::Value*> codegen_val_vec{
549  code_generator.codegenPseudoStringOper(coerced_col_var, string_op_infos, co)};
550  return codegen_val_vec[0];
551  }
552  return code_generator.codegen(col_or_string_oper, true, co)[0];
553 }
554 
555 std::shared_ptr<Analyzer::ColumnVar> getSyntheticColumnVar(std::string_view table,
556  std::string_view column,
557  int rte_idx,
558  Executor* executor) {
559  auto catalog = executor->getCatalog();
560  CHECK(catalog);
561 
562  auto tmeta = catalog->getMetadataForTable(std::string(table));
563  CHECK(tmeta);
564 
565  auto cmeta = catalog->getMetadataForColumn(tmeta->tableId, std::string(column));
566  CHECK(cmeta);
567 
568  auto ti = cmeta->columnType;
569 
570  if (ti.is_geometry() && ti.get_type() != kPOINT) {
571  int geoColumnId{0};
572  switch (ti.get_type()) {
573  case kLINESTRING: {
574  geoColumnId = cmeta->columnId + 2;
575  break;
576  }
577  case kPOLYGON: {
578  geoColumnId = cmeta->columnId + 3;
579  break;
580  }
581  case kMULTIPOLYGON: {
582  geoColumnId = cmeta->columnId + 4;
583  break;
584  }
585  default:
586  CHECK(false);
587  }
588  cmeta = catalog->getMetadataForColumn(tmeta->tableId, geoColumnId);
589  CHECK(cmeta);
590  ti = cmeta->columnType;
591  }
592 
593  auto cv =
594  std::make_shared<Analyzer::ColumnVar>(ti, tmeta->tableId, cmeta->columnId, rte_idx);
595  return cv;
596 }
597 
599  : public ScalarExprVisitor<std::set<const Analyzer::ColumnVar*>> {
600  protected:
601  std::set<const Analyzer::ColumnVar*> visitColumnVar(
602  const Analyzer::ColumnVar* column) const override {
603  return {column};
604  }
605 
606  std::set<const Analyzer::ColumnVar*> visitColumnVarTuple(
607  const Analyzer::ExpressionTuple* expr_tuple) const override {
608  AllColumnVarsVisitor visitor;
609  std::set<const Analyzer::ColumnVar*> result;
610  for (const auto& expr_component : expr_tuple->getTuple()) {
611  const auto component_rte_set = visitor.visit(expr_component.get());
612  result.insert(component_rte_set.begin(), component_rte_set.end());
613  }
614  return result;
615  }
616 
617  std::set<const Analyzer::ColumnVar*> aggregateResult(
618  const std::set<const Analyzer::ColumnVar*>& aggregate,
619  const std::set<const Analyzer::ColumnVar*>& next_result) const override {
620  auto result = aggregate;
621  result.insert(next_result.begin(), next_result.end());
622  return result;
623  }
624 };
625 
626 void setupSyntheticCaching(std::set<const Analyzer::ColumnVar*> cvs, Executor* executor) {
627  std::unordered_set<int> phys_table_ids;
628  for (auto cv : cvs) {
629  phys_table_ids.insert(cv->get_table_id());
630  }
631 
632  std::unordered_set<PhysicalInput> phys_inputs;
633  for (auto cv : cvs) {
634  phys_inputs.emplace(PhysicalInput{cv->get_column_id(), cv->get_table_id()});
635  }
636 
637  executor->setupCaching(phys_inputs, phys_table_ids);
638 }
639 
640 std::vector<InputTableInfo> getSyntheticInputTableInfo(
641  std::set<const Analyzer::ColumnVar*> cvs,
642  Executor* executor) {
643  auto catalog = executor->getCatalog();
644  CHECK(catalog);
645 
646  std::unordered_set<int> phys_table_ids;
647  for (auto cv : cvs) {
648  phys_table_ids.insert(cv->get_table_id());
649  }
650 
651  // NOTE(sy): This vector ordering seems to work for now, but maybe we need to
652  // review how rte_idx is assigned for ColumnVars. See for example Analyzer.h
653  // and RelAlgExecutor.cpp and rte_idx there.
654  std::vector<InputTableInfo> query_infos(phys_table_ids.size());
655  size_t i = 0;
656  for (auto id : phys_table_ids) {
657  auto tmeta = catalog->getMetadataForTable(id);
658  query_infos[i].table_id = id;
659  query_infos[i].info = tmeta->fragmenter->getFragmentsForQuery();
660  ++i;
661  }
662 
663  return query_infos;
664 }
665 
667 std::shared_ptr<HashJoin> HashJoin::getSyntheticInstance(
668  std::string_view table1,
669  std::string_view column1,
670  std::string_view table2,
671  std::string_view column2,
672  const Data_Namespace::MemoryLevel memory_level,
673  const HashType preferred_hash_type,
674  const int device_count,
675  ColumnCacheMap& column_cache,
676  Executor* executor) {
677  auto a1 = getSyntheticColumnVar(table1, column1, 0, executor);
678  auto a2 = getSyntheticColumnVar(table2, column2, 1, executor);
679 
680  auto qual_bin_oper = std::make_shared<Analyzer::BinOper>(kBOOLEAN, kEQ, kONE, a1, a2);
681 
682  std::set<const Analyzer::ColumnVar*> cvs =
683  AllColumnVarsVisitor().visit(qual_bin_oper.get());
684  auto query_infos = getSyntheticInputTableInfo(cvs, executor);
685  setupSyntheticCaching(cvs, executor);
687 
688  auto hash_table = HashJoin::getInstance(qual_bin_oper,
689  query_infos,
690  memory_level,
692  preferred_hash_type,
693  device_count,
694  column_cache,
695  executor,
696  {},
697  query_hint,
698  {});
699  return hash_table;
700 }
701 
703 std::shared_ptr<HashJoin> HashJoin::getSyntheticInstance(
704  const std::shared_ptr<Analyzer::BinOper> qual_bin_oper,
705  const Data_Namespace::MemoryLevel memory_level,
706  const HashType preferred_hash_type,
707  const int device_count,
708  ColumnCacheMap& column_cache,
709  Executor* executor) {
710  std::set<const Analyzer::ColumnVar*> cvs =
711  AllColumnVarsVisitor().visit(qual_bin_oper.get());
712  auto query_infos = getSyntheticInputTableInfo(cvs, executor);
713  setupSyntheticCaching(cvs, executor);
715 
716  auto hash_table = HashJoin::getInstance(qual_bin_oper,
717  query_infos,
718  memory_level,
720  preferred_hash_type,
721  device_count,
722  column_cache,
723  executor,
724  {},
725  query_hint,
726  {});
727  return hash_table;
728 }
729 
730 std::pair<std::string, std::shared_ptr<HashJoin>> HashJoin::getSyntheticInstance(
731  std::vector<std::shared_ptr<Analyzer::BinOper>> qual_bin_opers,
732  const Data_Namespace::MemoryLevel memory_level,
733  const HashType preferred_hash_type,
734  const int device_count,
735  ColumnCacheMap& column_cache,
736  Executor* executor) {
737  std::set<const Analyzer::ColumnVar*> cvs;
738  for (auto& qual : qual_bin_opers) {
739  auto cv = AllColumnVarsVisitor().visit(qual.get());
740  cvs.insert(cv.begin(), cv.end());
741  }
742  auto query_infos = getSyntheticInputTableInfo(cvs, executor);
743  setupSyntheticCaching(cvs, executor);
745  std::shared_ptr<HashJoin> hash_table;
746  std::string error_msg;
747  for (auto& qual : qual_bin_opers) {
748  try {
749  auto candidate_hash_table = HashJoin::getInstance(qual,
750  query_infos,
751  memory_level,
753  preferred_hash_type,
754  device_count,
755  column_cache,
756  executor,
757  {},
758  query_hint,
759  {});
760  if (candidate_hash_table) {
761  hash_table = candidate_hash_table;
762  }
763  } catch (HashJoinFail& e) {
764  error_msg = e.what();
765  continue;
766  }
767  }
768  return std::make_pair(error_msg, hash_table);
769 }
770 
772  const size_t shard_count,
773  const Executor* executor) {
774  if (!g_cluster) {
775  return;
776  }
777  if (table_id >= 0) {
778  CHECK(executor);
779  const auto inner_td = executor->getCatalog()->getMetadataForTable(table_id);
780  CHECK(inner_td);
781  if (!shard_count && !table_is_replicated(inner_td)) {
782  throw TableMustBeReplicated(inner_td->tableName);
783  }
784  }
785 }
786 
787 template <typename T>
789  auto* target_expr = expr;
790  if (auto cast_expr = dynamic_cast<const Analyzer::UOper*>(expr)) {
791  target_expr = cast_expr->get_operand();
792  }
793  CHECK(target_expr);
794  return dynamic_cast<const T*>(target_expr);
795 }
796 
797 std::pair<InnerOuter, InnerOuterStringOpInfos> HashJoin::normalizeColumnPair(
798  const Analyzer::Expr* lhs,
799  const Analyzer::Expr* rhs,
801  const TemporaryTables* temporary_tables,
802  const bool is_overlaps_join) {
803  SQLTypeInfo lhs_ti = lhs->get_type_info();
804  SQLTypeInfo rhs_ti = rhs->get_type_info();
805  if (!is_overlaps_join) {
806  if (lhs_ti.get_type() != rhs_ti.get_type()) {
807  throw HashJoinFail("Equijoin types must be identical, found: " +
808  lhs_ti.get_type_name() + ", " + rhs_ti.get_type_name());
809  }
810  if (!lhs_ti.is_integer() && !lhs_ti.is_time() && !lhs_ti.is_string() &&
811  !lhs_ti.is_decimal()) {
812  throw HashJoinFail("Cannot apply hash join to inner column type " +
813  lhs_ti.get_type_name());
814  }
815  // Decimal types should be identical.
816  if (lhs_ti.is_decimal() && (lhs_ti.get_scale() != rhs_ti.get_scale() ||
817  lhs_ti.get_precision() != rhs_ti.get_precision())) {
818  throw HashJoinFail("Equijoin with different decimal types");
819  }
820  }
821 
822  const auto lhs_cast = dynamic_cast<const Analyzer::UOper*>(lhs);
823  const auto rhs_cast = dynamic_cast<const Analyzer::UOper*>(rhs);
824  if (lhs_ti.is_string() && (static_cast<bool>(lhs_cast) != static_cast<bool>(rhs_cast) ||
825  (lhs_cast && lhs_cast->get_optype() != kCAST) ||
826  (rhs_cast && rhs_cast->get_optype() != kCAST))) {
827  throw HashJoinFail(
828  "Cannot use hash join for given expression (non-cast unary operator)");
829  }
830  // Casts to decimal are not suported.
831  if (lhs_ti.is_decimal() && (lhs_cast || rhs_cast)) {
832  throw HashJoinFail("Cannot use hash join for given expression (cast to decimal)");
833  }
834  auto lhs_col = getHashJoinColumn<Analyzer::ColumnVar>(lhs);
835  auto rhs_col = getHashJoinColumn<Analyzer::ColumnVar>(rhs);
836 
837  const auto lhs_string_oper = getHashJoinColumn<Analyzer::StringOper>(lhs);
838  const auto rhs_string_oper = getHashJoinColumn<Analyzer::StringOper>(rhs);
839 
840  auto process_string_op_infos = [](const auto& string_oper, auto& col, auto& ti) {
841  std::vector<StringOps_Namespace::StringOpInfo> string_op_infos;
842  if (string_oper) {
843  col = dynamic_cast<const Analyzer::ColumnVar*>(string_oper->getArg(0));
844  if (!col) {
845  // Todo (todd): Allow for non-colvar inputs into string operators for
846  // join predicates
847  // We now guard against non constant/colvar/stringoper inputs
848  // in Analyzer::StringOper::check_operand_types, but keeping this to not
849  // depend on that logic if and when it changes as allowing non-colvar inputs
850  // for hash joins will be additional work on top of allowing them
851  // outside of join predicates
852  throw HashJoinFail(
853  "Hash joins involving string operators currently restricted to column inputs "
854  "(i.e. not case statements).");
855  }
856  ti = col->get_type_info();
857  CHECK(ti.is_dict_encoded_string());
858  const auto chained_string_op_exprs = string_oper->getChainedStringOpExprs();
859  CHECK_GT(chained_string_op_exprs.size(), 0UL);
860  for (const auto& chained_string_op_expr : chained_string_op_exprs) {
861  auto chained_string_op =
862  dynamic_cast<const Analyzer::StringOper*>(chained_string_op_expr.get());
863  CHECK(chained_string_op);
864  StringOps_Namespace::StringOpInfo string_op_info(
865  chained_string_op->get_kind(), chained_string_op->getLiteralArgs());
866  string_op_infos.emplace_back(string_op_info);
867  }
868  }
869  return string_op_infos;
870  };
871 
872  auto outer_string_op_infos = process_string_op_infos(lhs_string_oper, lhs_col, lhs_ti);
873  auto inner_string_op_infos = process_string_op_infos(rhs_string_oper, rhs_col, rhs_ti);
874 
875  if (!lhs_col && !rhs_col) {
876  throw HashJoinFail(
877  "Cannot use hash join for given expression (both lhs and rhs are invalid)",
879  }
880 
881  const Analyzer::ColumnVar* inner_col{nullptr};
882  const Analyzer::ColumnVar* outer_col{nullptr};
883  auto outer_ti = lhs_ti;
884  auto inner_ti = rhs_ti;
885  const Analyzer::Expr* outer_expr{lhs};
886  InnerQualDecision inner_qual_decision = InnerQualDecision::UNKNOWN;
887  if (!lhs_col || (rhs_col && lhs_col->get_rte_idx() < rhs_col->get_rte_idx())) {
888  inner_qual_decision = InnerQualDecision::RHS;
889  inner_col = rhs_col;
890  outer_col = lhs_col;
891  } else {
892  inner_qual_decision = InnerQualDecision::LHS;
893  if (lhs_col && lhs_col->get_rte_idx() == 0) {
894  throw HashJoinFail(
895  "Cannot use hash join for given expression (lhs' rte idx is zero)",
896  inner_qual_decision);
897  }
898  inner_col = lhs_col;
899  outer_col = rhs_col;
900  std::swap(outer_ti, inner_ti);
901  std::swap(outer_string_op_infos, inner_string_op_infos);
902  outer_expr = rhs;
903  }
904  if (!inner_col) {
905  throw HashJoinFail("Cannot use hash join for given expression (invalid inner col)",
906  inner_qual_decision);
907  }
908  if (!outer_col) {
909  // check whether outer_col is a constant, i.e., inner_col = K;
910  const auto outer_constant_col = dynamic_cast<const Analyzer::Constant*>(outer_expr);
911  if (outer_constant_col) {
912  throw HashJoinFail(
913  "Cannot use hash join for given expression: try to join with a constant "
914  "value",
915  inner_qual_decision);
916  }
917  MaxRangeTableIndexVisitor rte_idx_visitor;
918  int outer_rte_idx = rte_idx_visitor.visit(outer_expr);
919  // The inner column candidate is not actually inner; the outer
920  // expression contains columns which are at least as deep.
921  if (inner_col->get_rte_idx() <= outer_rte_idx) {
922  throw HashJoinFail(
923  "Cannot use hash join for given expression (inner's rte <= outer's rte)",
924  inner_qual_decision);
925  }
926  }
927  // We need to fetch the actual type information from the catalog since Analyzer
928  // always reports nullable as true for inner table columns in left joins.
929  const auto inner_col_cd = get_column_descriptor_maybe(
930  inner_col->get_column_id(), inner_col->get_table_id(), cat);
931  const auto inner_col_real_ti = get_column_type(inner_col->get_column_id(),
932  inner_col->get_table_id(),
933  inner_col_cd,
934  temporary_tables);
935  const auto& outer_col_ti =
936  !(dynamic_cast<const Analyzer::FunctionOper*>(lhs)) && outer_col
937  ? outer_col->get_type_info()
938  : outer_ti;
939  // Casts from decimal are not supported.
940  if ((inner_col_real_ti.is_decimal() || outer_col_ti.is_decimal()) &&
941  (lhs_cast || rhs_cast)) {
942  throw HashJoinFail("Cannot use hash join for given expression (cast from decimal)");
943  }
944  if (is_overlaps_join) {
945  if (!inner_col_real_ti.is_array()) {
946  throw HashJoinFail(
947  "Overlaps join only supported for inner columns with array type");
948  }
949  auto is_bounds_array = [](const auto ti) {
950  return ti.is_fixlen_array() && ti.get_size() == 32;
951  };
952  if (!is_bounds_array(inner_col_real_ti)) {
953  throw HashJoinFail(
954  "Overlaps join only supported for 4-element double fixed length arrays");
955  }
956  if (!(outer_col_ti.get_type() == kPOINT || is_bounds_array(outer_col_ti) ||
957  is_constructed_point(outer_expr))) {
958  throw HashJoinFail(
959  "Overlaps join only supported for geometry outer columns of type point, "
960  "geometry columns with bounds or constructed points");
961  }
962  } else {
963  if (!(inner_col_real_ti.is_integer() || inner_col_real_ti.is_time() ||
964  inner_col_real_ti.is_decimal() ||
965  (inner_col_real_ti.is_string() &&
966  inner_col_real_ti.get_compression() == kENCODING_DICT))) {
967  throw HashJoinFail(
968  "Can only apply hash join to integer-like types and dictionary encoded "
969  "strings");
970  }
971  }
972 
973  auto normalized_inner_col = inner_col;
974  auto normalized_outer_col = outer_col ? outer_col : outer_expr;
975 
976  const auto& normalized_inner_ti = normalized_inner_col->get_type_info();
977  const auto& normalized_outer_ti = normalized_outer_col->get_type_info();
978 
979  if (normalized_inner_ti.is_string() != normalized_outer_ti.is_string()) {
980  throw HashJoinFail(std::string("Could not build hash tables for incompatible types " +
981  normalized_inner_ti.get_type_name() + " and " +
982  normalized_outer_ti.get_type_name()));
983  }
984  return std::make_pair(std::make_pair(normalized_inner_col, normalized_outer_col),
985  std::make_pair(inner_string_op_infos, outer_string_op_infos));
986 }
987 
988 std::pair<std::vector<InnerOuter>, std::vector<InnerOuterStringOpInfos>>
991  const TemporaryTables* temporary_tables) {
992  std::pair<std::vector<InnerOuter>, std::vector<InnerOuterStringOpInfos>> result;
993  const auto lhs_tuple_expr =
994  dynamic_cast<const Analyzer::ExpressionTuple*>(condition->get_left_operand());
995  const auto rhs_tuple_expr =
996  dynamic_cast<const Analyzer::ExpressionTuple*>(condition->get_right_operand());
997 
998  CHECK_EQ(static_cast<bool>(lhs_tuple_expr), static_cast<bool>(rhs_tuple_expr));
999  if (lhs_tuple_expr) {
1000  const auto& lhs_tuple = lhs_tuple_expr->getTuple();
1001  const auto& rhs_tuple = rhs_tuple_expr->getTuple();
1002  CHECK_EQ(lhs_tuple.size(), rhs_tuple.size());
1003  for (size_t i = 0; i < lhs_tuple.size(); ++i) {
1004  const auto col_pair = normalizeColumnPair(lhs_tuple[i].get(),
1005  rhs_tuple[i].get(),
1006  cat,
1007  temporary_tables,
1008  condition->is_overlaps_oper());
1009  result.first.emplace_back(col_pair.first);
1010  result.second.emplace_back(col_pair.second);
1011  }
1012  } else {
1013  CHECK(!lhs_tuple_expr && !rhs_tuple_expr);
1014  const auto col_pair = normalizeColumnPair(condition->get_left_operand(),
1015  condition->get_right_operand(),
1016  cat,
1017  temporary_tables,
1018  condition->is_overlaps_oper());
1019  result.first.emplace_back(col_pair.first);
1020  result.second.emplace_back(col_pair.second);
1021  }
1022 
1023  return result;
1024 }
1025 
1026 bool HashJoin::canAccessHashTable(bool allow_hash_table_recycling,
1027  bool invalid_cache_key,
1028  JoinType join_type) {
1029  return g_enable_data_recycler && g_use_hashtable_cache && !invalid_cache_key &&
1030  allow_hash_table_recycling && join_type != JoinType::INVALID;
1031 }
1032 
1033 namespace {
1034 
1037  const TemporaryTables* temporary_tables) {
1038  const auto lhs = qual_bin_oper->get_left_operand();
1039  const auto rhs = qual_bin_oper->get_right_operand();
1040  return HashJoin::normalizeColumnPair(lhs, rhs, cat, temporary_tables).first;
1041 }
1042 
1043 } // namespace
1044 
1045 size_t get_shard_count(const Analyzer::BinOper* join_condition,
1046  const Executor* executor) {
1047  const Analyzer::ColumnVar* inner_col{nullptr};
1048  const Analyzer::Expr* outer_col{nullptr};
1049  std::shared_ptr<Analyzer::BinOper> redirected_bin_oper;
1050  try {
1051  std::tie(inner_col, outer_col) =
1052  get_cols(join_condition, *executor->getCatalog(), executor->getTemporaryTables());
1053  } catch (...) {
1054  return 0;
1055  }
1056  if (!inner_col || !outer_col) {
1057  return 0;
1058  }
1059  return get_shard_count({inner_col, outer_col}, executor);
1060 }
static std::vector< int > collectFragmentIds(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments)
Definition: HashJoin.cpp:446
int64_t getIntMin() const
#define CHECK_EQ(x, y)
Definition: Logger.h:231
std::vector< int > ChunkKey
Definition: types.h:37
std::vector< InputTableInfo > getSyntheticInputTableInfo(std::set< const Analyzer::ColumnVar * > cvs, Executor *executor)
Definition: HashJoin.cpp:640
virtual HashJoinMatchingSet codegenMatchingSet(const CompilationOptions &, const size_t)=0
JoinType
Definition: sqldefs.h:136
std::string cat(Ts &&...args)
static llvm::Value * codegenHashTableLoad(const size_t table_idx, Executor *executor)
Definition: HashJoin.cpp:257
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:114
std::pair< const Analyzer::ColumnVar *, const Analyzer::Expr * > InnerOuter
Definition: HashJoin.h:95
std::string toStringFlat(const HashJoin *hash_table, const ExecutorDeviceType device_type, const int device_id)
Definition: HashJoin.cpp:98
static bool canAccessHashTable(bool allow_hash_table_recycling, bool invalid_cache_key, JoinType join_type)
Definition: HashJoin.cpp:1026
ExecutorDeviceType
std::vector< const void * > sd_inner_proxy_per_key
Definition: HashJoin.h:117
virtual std::string toStringFlat64(const ExecutorDeviceType device_type, const int device_id) const
Definition: HashJoin.cpp:116
std::list< std::shared_ptr< Analyzer::Expr > > coalesce_singleton_equi_join(const std::shared_ptr< Analyzer::BinOper > &join_qual)
std::ostream & operator<<(std::ostream &os, const SessionInfo &session_info)
Definition: SessionInfo.cpp:57
static std::pair< std::vector< InnerOuter >, std::vector< InnerOuterStringOpInfos > > normalizeColumnPairs(const Analyzer::BinOper *condition, const Catalog_Namespace::Catalog &cat, const TemporaryTables *temporary_tables)
Definition: HashJoin.cpp:989
static JoinColumn makeJoinColumn(Executor *executor, const Analyzer::ColumnVar &hash_col, const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments, const Data_Namespace::MemoryLevel effective_mem_lvl, const int device_id, DeviceAllocator *device_allocator, const size_t thread_idx, std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner, std::vector< std::shared_ptr< void >> &malloc_owner, ColumnCacheMap &column_cache)
Creates a JoinColumn struct containing an array of JoinChunk structs.
void setBucketInfo(const std::vector< double > &bucket_sizes_for_dimension, const std::vector< InnerOuter > inner_outer_pairs)
Definition: HashJoin.cpp:35
std::set< const Analyzer::ColumnVar * > aggregateResult(const std::set< const Analyzer::ColumnVar * > &aggregate, const std::set< const Analyzer::ColumnVar * > &next_result) const override
Definition: HashJoin.cpp:617
HOST DEVICE int get_scale() const
Definition: sqltypes.h:334
const Expr * get_right_operand() const
Definition: Analyzer.h:450
bool is_constructed_point(const Analyzer::Expr *expr)
Definition: Execute.h:1432
JoinColumn fetchJoinColumn(const Analyzer::ColumnVar *hash_col, const std::vector< Fragmenter_Namespace::FragmentInfo > &fragment_info, const Data_Namespace::MemoryLevel effective_memory_level, const int device_id, std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner, DeviceAllocator *dev_buff_owner, std::vector< std::shared_ptr< void >> &malloc_owner, Executor *executor, ColumnCacheMap *column_cache)
Definition: HashJoin.cpp:58
static std::shared_ptr< OverlapsJoinHashTable > getInstance(const std::shared_ptr< Analyzer::BinOper > condition, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const RegisteredQueryHint &query_hint, const TableIdToNodeMap &table_id_to_node_map)
Make hash table from an in-flight SQL query&#39;s parse tree etc.
static std::pair< const StringDictionaryProxy *, StringDictionaryProxy * > getStrDictProxies(const InnerOuter &cols, const Executor *executor, const bool has_string_ops)
Definition: HashJoin.cpp:380
DEVICE void sort(ARGS &&...args)
Definition: gpu_enabled.h:105
const SQLTypeInfo get_column_type(const int col_id, const int table_id, const ColumnDescriptor *cd, const TemporaryTables *temporary_tables)
Definition: Execute.h:236
Definition: sqldefs.h:49
llvm::Value * codegenPseudoStringOper(const Analyzer::ColumnVar *, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos, const CompilationOptions &)
Definition: sqldefs.h:30
Definition: HashTable.h:21
virtual int8_t * alloc(const size_t num_bytes)=0
T visit(const Analyzer::Expr *expr) const
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:329
static llvm::Value * codegenColOrStringOper(const Analyzer::Expr *col_or_string_oper, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos, CodeGenerator &code_generator, const CompilationOptions &co)
Definition: HashJoin.cpp:539
bool g_enable_data_recycler
Definition: Execute.cpp:146
#define CHECK_GT(x, y)
Definition: Logger.h:235
bool is_time() const
Definition: sqltypes.h:516
virtual std::string toStringFlat32(const ExecutorDeviceType device_type, const int device_id) const
Definition: HashJoin.cpp:121
std::string to_string(char const *&&v)
bool g_enable_overlaps_hashjoin
Definition: Execute.cpp:102
std::unordered_map< int, const ResultSetPtr & > TemporaryTables
Definition: InputMetadata.h:31
const std::vector< JoinColumnTypeInfo > join_column_types
Definition: HashJoin.h:101
virtual void copyToDevice(void *device_dst, const void *host_src, const size_t num_bytes) const =0
size_t col_chunks_buff_sz
static std::pair< InnerOuter, InnerOuterStringOpInfos > normalizeColumnPair(const Analyzer::Expr *lhs, const Analyzer::Expr *rhs, const Catalog_Namespace::Catalog &cat, const TemporaryTables *temporary_tables, const bool is_overlaps_join=false)
Definition: HashJoin.cpp:797
std::unordered_map< size_t, HashTableBuildDag > HashTableBuildDagMap
const std::vector< std::shared_ptr< Analyzer::Expr > > & getTuple() const
Definition: Analyzer.h:251
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:166
std::vector< void * > sd_outer_proxy_per_key
Definition: HashJoin.h:118
const ColumnDescriptor * get_column_descriptor_maybe(const int col_id, const int table_id, const Catalog_Namespace::Catalog &cat)
Definition: Execute.h:220
bool is_integer() const
Definition: sqltypes.h:512
#define CHECK_NE(x, y)
Definition: Logger.h:232
std::unordered_map< int, const RelAlgNode * > TableIdToNodeMap
int8_t * getJoinHashBuffer(const ExecutorDeviceType device_type, const int device_id) const
Definition: HashJoin.h:288
static std::vector< const StringDictionaryProxy::IdMap * > translateCompositeStrDictProxies(const CompositeKeyInfo &composite_key_info, const std::vector< InnerOuterStringOpInfos > &string_op_infos_for_keys, const Executor *executor)
Definition: HashJoin.cpp:503
OUTPUT transform(INPUT const &input, FUNC const &func)
Definition: misc.h:297
bool hasNulls() const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
const int8_t * col_chunks_buff
std::string toString(const Executor::ExtModuleKinds &kind)
Definition: Execute.h:1453
static void checkHashJoinReplicationConstraint(const int table_id, const size_t shard_count, const Executor *executor)
Definition: HashJoin.cpp:771
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:81
int get_precision() const
Definition: sqltypes.h:332
static ExpressionRange makeIntRange(const int64_t int_min, const int64_t int_max, const int64_t bucket, const bool has_nulls)
static const StringDictionaryProxy::IdMap * translateInnerToOuterStrDictProxies(const InnerOuter &cols, const InnerOuterStringOpInfos &inner_outer_string_op_infos, ExpressionRange &old_col_range, const Executor *executor)
Definition: HashJoin.cpp:408
std::unordered_map< int, std::unordered_map< int, std::shared_ptr< const ColumnarResults >>> ColumnCacheMap
void setupSyntheticCaching(std::set< const Analyzer::ColumnVar * > cvs, Executor *executor)
Definition: HashJoin.cpp:626
static std::shared_ptr< BaselineJoinHashTable > getInstance(const std::shared_ptr< Analyzer::BinOper > condition, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const TableIdToNodeMap &table_id_to_node_map)
Make hash table from an in-flight SQL query&#39;s parse tree etc.
#define VLOGGING(n)
Definition: Logger.h:221
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
Definition: IRCodegen.cpp:30
#define CHECK_LT(x, y)
Definition: Logger.h:233
static RegisteredQueryHint defaults()
Definition: QueryHint.h:237
Definition: sqldefs.h:71
Expression class for string functions The &quot;arg&quot; constructor parameter must be an expression that reso...
Definition: Analyzer.h:1463
static std::shared_ptr< PerfectJoinHashTable > getInstance(const std::shared_ptr< Analyzer::BinOper > qual_bin_oper, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const TableIdToNodeMap &table_id_to_node_map)
Make hash table from an in-flight SQL query&#39;s parse tree etc.
size_t getJoinHashBufferSize(const ExecutorDeviceType device_type)
Definition: HashJoin.h:274
bool table_is_replicated(const TableDescriptor *td)
std::set< DecodedJoinHashBufferEntry > DecodedJoinHashBufferSet
Definition: HashTable.h:34
std::set< const Analyzer::ColumnVar * > visitColumnVarTuple(const Analyzer::ExpressionTuple *expr_tuple) const override
Definition: HashJoin.cpp:606
std::string get_type_name() const
Definition: sqltypes.h:443
int64_t getIntMax() const
std::set< const Analyzer::ColumnVar * > visitColumnVar(const Analyzer::ColumnVar *column) const override
Definition: HashJoin.cpp:601
std::pair< std::vector< StringOps_Namespace::StringOpInfo >, std::vector< StringOps_Namespace::StringOpInfo >> InnerOuterStringOpInfos
Definition: HashJoin.h:97
#define CHECK(condition)
Definition: Logger.h:223
std::set< int32_t > payload
Definition: HashTable.h:23
#define DEBUG_TIMER(name)
Definition: Logger.h:370
static const T * getHashJoinColumn(const Analyzer::Expr *expr)
Definition: HashJoin.cpp:788
bool g_cluster
const Expr * get_left_operand() const
Definition: Analyzer.h:449
bool is_overlaps_oper() const
Definition: Analyzer.h:447
static std::shared_ptr< HashJoin > getSyntheticInstance(std::string_view table1, std::string_view column1, std::string_view table2, std::string_view column2, const Data_Namespace::MemoryLevel memory_level, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor)
Make hash table from named tables and columns (such as for testing).
Definition: HashJoin.cpp:667
InnerQualDecision
Definition: HashJoin.h:52
bool is_string() const
Definition: sqltypes.h:510
std::vector< int64_t > key
Definition: HashTable.h:22
std::vector< JoinBucketInfo > join_buckets
Definition: HashJoin.h:103
bool is_decimal() const
Definition: sqltypes.h:513
size_t get_shard_count(const Analyzer::BinOper *join_condition, const Executor *executor)
Definition: HashJoin.cpp:1045
static std::shared_ptr< HashJoin > getInstance(const std::shared_ptr< Analyzer::BinOper > qual_bin_oper, const std::vector< InputTableInfo > &query_infos, const Data_Namespace::MemoryLevel memory_level, const JoinType join_type, const HashType preferred_hash_type, const int device_count, ColumnCacheMap &column_cache, Executor *executor, const HashTableBuildDagMap &hashtable_build_dag_map, const RegisteredQueryHint &query_hint, const TableIdToNodeMap &table_id_to_node_map)
Make hash table from an in-flight SQL query&#39;s parse tree etc.
Definition: HashJoin.cpp:283
std::shared_ptr< Analyzer::ColumnVar > getSyntheticColumnVar(std::string_view table, std::string_view column, int rte_idx, Executor *executor)
Definition: HashJoin.cpp:555
HashType
Definition: HashTable.h:19
DEVICE void swap(ARGS &&...args)
Definition: gpu_enabled.h:114
InnerOuter get_cols(const Analyzer::BinOper *qual_bin_oper, const Catalog_Namespace::Catalog &cat, const TemporaryTables *temporary_tables)
Definition: HashJoin.cpp:1035
bool g_use_hashtable_cache
Definition: Execute.cpp:147
const std::vector< JoinColumn > join_columns
Definition: HashJoin.h:100
#define VLOG(n)
Definition: Logger.h:317
static CompositeKeyInfo getCompositeKeyInfo(const std::vector< InnerOuter > &inner_outer_pairs, const Executor *executor, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs={})
Definition: HashJoin.cpp:455