OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
HashtableRecycler.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2021 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "HashtableRecycler.h"
18 
19 extern bool g_is_test_env;
20 
22  QueryPlanHash key,
23  CacheItemType item_type,
24  DeviceIdentifier device_identifier,
25  std::lock_guard<std::mutex>& lock,
26  std::optional<HashtableCacheMetaInfo> meta_info) const {
29  return false;
30  }
31  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
32  // hashtable cache of the *any* device type should be properly initialized
33  CHECK(hashtable_cache);
34  auto candidate_ht_it = std::find_if(
35  hashtable_cache->begin(), hashtable_cache->end(), [&key](const auto& cached_item) {
36  return cached_item.key == key;
37  });
38  if (candidate_ht_it != hashtable_cache->end()) {
39  if (item_type == OVERLAPS_HT) {
40  CHECK(candidate_ht_it->meta_info && candidate_ht_it->meta_info->overlaps_meta_info);
41  CHECK(meta_info && meta_info->overlaps_meta_info);
43  *candidate_ht_it->meta_info->overlaps_meta_info,
44  *meta_info->overlaps_meta_info)) {
45  return true;
46  }
47  } else {
48  return true;
49  }
50  }
51  return false;
52 }
53 
54 std::shared_ptr<HashTable> HashtableRecycler::getItemFromCache(
55  QueryPlanHash key,
56  CacheItemType item_type,
57  DeviceIdentifier device_identifier,
58  std::optional<HashtableCacheMetaInfo> meta_info) {
61  return nullptr;
62  }
63  std::lock_guard<std::mutex> lock(getCacheLock());
64  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
65  auto candidate_ht = getCachedItemWithoutConsideringMetaInfo(
66  key, item_type, device_identifier, *hashtable_cache, lock);
67  if (candidate_ht) {
68  bool can_return_cached_item = false;
69  if (item_type == OVERLAPS_HT) {
70  // we have to check hashtable metainfo for overlaps join hashtable
71  CHECK(candidate_ht->meta_info && candidate_ht->meta_info->overlaps_meta_info);
72  CHECK(meta_info && meta_info->overlaps_meta_info);
74  *candidate_ht->meta_info->overlaps_meta_info,
75  *meta_info->overlaps_meta_info)) {
76  can_return_cached_item = true;
77  }
78  } else {
79  can_return_cached_item = true;
80  }
81  if (can_return_cached_item) {
82  CHECK(!candidate_ht->isDirty());
83  candidate_ht->item_metric->incRefCount();
84  VLOG(1) << "[" << item_type << ", "
86  << "] Recycle item in a cache (key: " << key << ")";
87  return candidate_ht->cached_item;
88  }
89  }
90  return nullptr;
91 }
92 
94  std::shared_ptr<HashTable> item_ptr,
95  CacheItemType item_type,
96  DeviceIdentifier device_identifier,
97  size_t item_size,
98  size_t compute_time,
99  std::optional<HashtableCacheMetaInfo> meta_info) {
101  key == EMPTY_HASHED_PLAN_DAG_KEY) {
102  return;
103  }
104  std::lock_guard<std::mutex> lock(getCacheLock());
105  auto has_cached_ht = hasItemInCache(key, item_type, device_identifier, lock, meta_info);
106  if (has_cached_ht) {
107  // check to see whether the cached one is in a dirty status
108  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
109  auto candidate_it =
110  std::find_if(hashtable_cache->begin(),
111  hashtable_cache->end(),
112  [&key](const auto& cached_item) { return cached_item.key == key; });
113  bool found_candidate = false;
114  if (candidate_it != hashtable_cache->end()) {
115  if (item_type == OVERLAPS_HT) {
116  // we have to check hashtable metainfo for overlaps join hashtable
117  CHECK(candidate_it->meta_info && candidate_it->meta_info->overlaps_meta_info);
118  CHECK(meta_info && meta_info->overlaps_meta_info);
120  *candidate_it->meta_info->overlaps_meta_info,
121  *meta_info->overlaps_meta_info)) {
122  found_candidate = true;
123  }
124  } else {
125  found_candidate = true;
126  }
127  if (found_candidate && candidate_it->isDirty()) {
128  // remove the dirty item from the cache and make a room for the new one
130  key, item_type, device_identifier, lock, candidate_it->meta_info);
131  has_cached_ht = false;
132  }
133  }
134  }
135 
136  if (!has_cached_ht) {
137  // check cache's space availability
138  auto& metric_tracker = getMetricTracker(item_type);
139  auto cache_status = metric_tracker.canAddItem(device_identifier, item_size);
140  if (cache_status == CacheAvailability::UNAVAILABLE) {
141  // hashtable is too large
142  return;
143  } else if (cache_status == CacheAvailability::AVAILABLE_AFTER_CLEANUP) {
144  // we need to cleanup some cached hashtables to make a room to insert this hashtable
145  // here we try to cache the new one anyway since we don't know the importance of
146  // this hashtable yet and if it is not that frequently reused it is removed
147  // in a near future
148  auto required_size = metric_tracker.calculateRequiredSpaceForItemAddition(
149  device_identifier, item_size);
150  cleanupCacheForInsertion(item_type, device_identifier, required_size, lock);
151  }
152  // put hashtable's metric to metric tracker
153  auto new_cache_metric_ptr = metric_tracker.putNewCacheItemMetric(
154  key, device_identifier, item_size, compute_time);
155  CHECK_EQ(item_size, new_cache_metric_ptr->getMemSize());
156  // put hashtable to cache
157  VLOG(1) << "[" << item_type << ", "
158  << DataRecyclerUtil::getDeviceIdentifierString(device_identifier)
159  << "] Put item to cache (key: " << key << ")";
160  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
161  hashtable_cache->emplace_back(key, item_ptr, new_cache_metric_ptr, meta_info);
162  }
163  // we have a cached hashtable in a clean status
164  return;
165 }
166 
168  QueryPlanHash key,
169  CacheItemType item_type,
170  DeviceIdentifier device_identifier,
171  std::lock_guard<std::mutex>& lock,
172  std::optional<HashtableCacheMetaInfo> meta_info) {
174  key == EMPTY_HASHED_PLAN_DAG_KEY) {
175  return;
176  }
177  auto& cache_metrics = getMetricTracker(item_type);
178  // remove cached item from the cache
179  auto cache_metric = cache_metrics.getCacheItemMetric(key, device_identifier);
180  CHECK(cache_metric);
181  auto hashtable_size = cache_metric->getMemSize();
182  auto hashtable_container = getCachedItemContainer(item_type, device_identifier);
183  auto filter = [key](auto const& item) { return item.key == key; };
184  auto itr =
185  std::find_if(hashtable_container->cbegin(), hashtable_container->cend(), filter);
186  if (itr == hashtable_container->cend()) {
187  return;
188  } else {
189  VLOG(1) << "[" << item_type << ", "
190  << DataRecyclerUtil::getDeviceIdentifierString(device_identifier)
191  << "] remove cached item from cache (key: " << key << ")";
192  hashtable_container->erase(itr);
193  }
194  // remove cache metric
195  cache_metrics.removeCacheItemMetric(key, device_identifier);
196  // update current cache size
197  cache_metrics.updateCurrentCacheSize(
198  device_identifier, CacheUpdateAction::REMOVE, hashtable_size);
199  return;
200 }
201 
203  CacheItemType item_type,
204  DeviceIdentifier device_identifier,
205  size_t required_size,
206  std::lock_guard<std::mutex>& lock,
207  std::optional<HashtableCacheMetaInfo> meta_info) {
208  // sort the vector based on the importance of the cached items (by # referenced, size
209  // and compute time) and then remove unimportant cached items
210  int elimination_target_offset = 0;
211  size_t removed_size = 0;
212  auto& metric_tracker = getMetricTracker(item_type);
213  auto actual_space_to_free = metric_tracker.getTotalCacheSize() / 2;
214  if (!g_is_test_env && required_size < actual_space_to_free) {
215  // remove enough items to avoid too frequent cache cleanup
216  // we do not apply thin to test code since test scenarios are designed to
217  // specific size of items and their caches
218  required_size = actual_space_to_free;
219  }
220  metric_tracker.sortCacheInfoByQueryMetric(device_identifier);
221  auto cached_item_metrics = metric_tracker.getCacheItemMetrics(device_identifier);
222  sortCacheContainerByQueryMetric(item_type, device_identifier);
223 
224  // collect targets to eliminate
225  for (auto& metric : cached_item_metrics) {
226  auto target_size = metric->getMemSize();
227  ++elimination_target_offset;
228  removed_size += target_size;
229  if (removed_size > required_size) {
230  break;
231  }
232  }
233 
234  // eliminate targets in 1) cache container and 2) their metrics
235  removeCachedItemFromBeginning(item_type, device_identifier, elimination_target_offset);
236  metric_tracker.removeMetricFromBeginning(device_identifier, elimination_target_offset);
237 
238  // update the current cache size after this cleanup
239  metric_tracker.updateCurrentCacheSize(
240  device_identifier, CacheUpdateAction::REMOVE, removed_size);
241 }
242 
244  std::lock_guard<std::mutex> lock(getCacheLock());
245  for (auto& item_type : getCacheItemType()) {
247  auto item_cache = getItemCache().find(item_type)->second;
248  for (auto& kv : *item_cache) {
249  VLOG(1) << "[" << item_type << ", "
252  << "] clear cache (# items: " << kv.second->size() << ")";
253  kv.second->clear();
254  }
255  }
257 }
258 
260  std::unordered_set<QueryPlanHash>& key_set,
261  CacheItemType item_type,
262  DeviceIdentifier device_identifier) {
263  if (!g_enable_data_recycler || !g_use_hashtable_cache || key_set.empty()) {
264  return;
265  }
266  std::lock_guard<std::mutex> lock(getCacheLock());
267  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
268  for (auto key : key_set) {
269  markCachedItemAsDirtyImpl(key, *hashtable_cache);
270  }
271  // after marking all cached hashtable having the given "table_key" as its one of input,
272  // we remove the mapping between the table_key -> hashed_query_plan_dag
273  // since we do not need to care about "already marked" item in the cache
275 }
276 
277 std::string HashtableRecycler::toString() const {
278  std::ostringstream oss;
279  oss << "A current status of the Hashtable Recycler:\n";
280  for (auto& item_type : getCacheItemType()) {
281  oss << "\t" << item_type;
282  auto& metric_tracker = getMetricTracker(item_type);
283  oss << "\n\t# cached hashtables:\n";
284  auto item_cache = getItemCache().find(item_type)->second;
285  for (auto& cache_container : *item_cache) {
286  oss << "\t\tDevice"
287  << DataRecyclerUtil::getDeviceIdentifierString(cache_container.first)
288  << ", # hashtables: " << cache_container.second->size() << "\n";
289  for (auto& ht : *cache_container.second) {
290  oss << "\t\t\tHT] " << ht.item_metric->toString() << "\n";
291  }
292  }
293  oss << "\t" << metric_tracker.toString() << "\n";
294  }
295  return oss.str();
296 }
297 
299  const OverlapsHashTableMetaInfo& candidate,
300  const OverlapsHashTableMetaInfo& target) const {
301  if (candidate.bucket_sizes.size() != target.bucket_sizes.size()) {
302  return false;
303  }
304  for (size_t i = 0; i < candidate.bucket_sizes.size(); i++) {
305  if (std::abs(target.bucket_sizes[i] - candidate.bucket_sizes[i]) > 1e-4) {
306  return false;
307  }
308  }
309  auto threshold_check =
311  auto hashtable_size_check =
313  return threshold_check && hashtable_size_check;
314 }
315 
317  std::vector<const Analyzer::ColumnVar*>& inner_cols,
318  std::vector<const Analyzer::ColumnVar*>& outer_cols,
319  Executor* executor) {
320  auto hashed_join_col_info = EMPTY_HASHED_PLAN_DAG_KEY;
321  boost::hash_combine(
322  hashed_join_col_info,
323  executor->getQueryPlanDagCache().translateColVarsToInfoHash(inner_cols, false));
324  boost::hash_combine(
325  hashed_join_col_info,
326  executor->getQueryPlanDagCache().translateColVarsToInfoHash(outer_cols, false));
327  return hashed_join_col_info;
328 }
329 
331  const TableIdToNodeMap& table_id_to_node_map,
332  bool need_dict_translation,
333  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_info_pairs,
334  const int table_id) {
335  // if hashtable is built from subquery's resultset we need to check
336  // 1) whether resulset rows can have inconsistency, e.g., rows can randomly be
337  // permutated per execution and 2) whether it needs dictionary translation for hashtable
338  // building to recycle the hashtable safely
339  auto getNodeByTableId =
340  [&table_id_to_node_map](const int table_id) -> const RelAlgNode* {
341  auto it = table_id_to_node_map.find(table_id);
342  if (it != table_id_to_node_map.end()) {
343  return it->second;
344  }
345  return nullptr;
346  };
347  bool found_sort_node = false;
348  bool found_project_node = false;
349  if (table_id < 0) {
350  auto origin_table_id = table_id * -1;
351  auto inner_node = getNodeByTableId(origin_table_id);
352  if (!inner_node) {
353  // we have to keep the node info of temporary resultset
354  // so in this case we are not safe to recycle the hashtable
355  return false;
356  }
357  // it is not safe to recycle the hashtable when
358  // this resultset may have resultset ordering inconsistency and/or
359  // need dictionary translation for hashtable building
360  auto sort_node = dynamic_cast<const RelSort*>(inner_node);
361  if (sort_node) {
362  found_sort_node = true;
363  } else {
364  auto project_node = dynamic_cast<const RelProject*>(inner_node);
365  if (project_node) {
366  found_project_node = true;
367  }
368  }
369  }
370  return !(found_sort_node || (found_project_node && need_dict_translation));
371 }
372 
374  const std::vector<QueryPlanHash>& cache_keys) {
375  return cache_keys.empty() ||
376  std::any_of(cache_keys.cbegin(), cache_keys.cend(), [](QueryPlanHash key) {
377  return key == EMPTY_HASHED_PLAN_DAG_KEY;
378  });
379 }
380 
382  const std::vector<InnerOuter>& inner_outer_pairs,
383  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_infos_pairs,
384  const SQLOps op_type,
385  const JoinType join_type,
386  const HashTableBuildDagMap& hashtable_build_dag_map,
387  int device_count,
388  int shard_count,
389  const std::vector<std::vector<Fragmenter_Namespace::FragmentInfo>>& frags_for_device,
390  Executor* executor) {
391  CHECK_GT(device_count, (int)0);
392  CHECK_GE(shard_count, (int)0);
393  std::vector<const Analyzer::ColumnVar*> inner_cols_vec, outer_cols_vec;
394  size_t join_qual_info = EMPTY_HASHED_PLAN_DAG_KEY;
395  for (auto& join_col_pair : inner_outer_pairs) {
396  inner_cols_vec.push_back(join_col_pair.first);
397  // extract inner join col's id
398  // b/c when the inner col comes from a subquery's resulset,
399  // table id / rte_index can be different even if we have the same
400  // subquery's semantic, i.e., project col A from table T
401  boost::hash_combine(join_qual_info,
402  executor->getQueryPlanDagCache().getJoinColumnsInfoHash(
403  join_col_pair.first, JoinColumnSide::kDirect, true));
404  boost::hash_combine(join_qual_info, op_type);
405  boost::hash_combine(join_qual_info, join_type);
406  auto outer_col_var = dynamic_cast<const Analyzer::ColumnVar*>(join_col_pair.second);
407  boost::hash_combine(join_qual_info, join_col_pair.first->get_type_info().toString());
408  if (outer_col_var) {
409  outer_cols_vec.push_back(outer_col_var);
410  if (join_col_pair.first->get_type_info().is_dict_encoded_string()) {
411  // add comp param for dict encoded string
412  boost::hash_combine(join_qual_info,
413  executor->getQueryPlanDagCache().getJoinColumnsInfoHash(
414  outer_col_var, JoinColumnSide::kDirect, true));
415  boost::hash_combine(join_qual_info, outer_col_var->get_type_info().toString());
416  }
417  }
418  }
419 
420  if (inner_outer_string_op_infos_pairs.size()) {
421  boost::hash_combine(join_qual_info, ::toString(inner_outer_string_op_infos_pairs));
422  }
423 
424  auto join_cols_info = getJoinColumnInfoHash(inner_cols_vec, outer_cols_vec, executor);
425  HashtableAccessPathInfo access_path_info(device_count);
426  auto it = hashtable_build_dag_map.find(join_cols_info);
427  if (it != hashtable_build_dag_map.end()) {
428  size_t hashtable_access_path = EMPTY_HASHED_PLAN_DAG_KEY;
429  boost::hash_combine(hashtable_access_path, it->second.inner_cols_access_path);
430  boost::hash_combine(hashtable_access_path, join_qual_info);
431  if (inner_cols_vec.front()->get_type_info().is_dict_encoded_string()) {
432  boost::hash_combine(hashtable_access_path, it->second.outer_cols_access_path);
433  }
434  boost::hash_combine(hashtable_access_path, shard_count);
435 
436  if (!shard_count) {
437  const auto frag_list = HashJoin::collectFragmentIds(frags_for_device[0]);
438  auto cache_key_for_device = hashtable_access_path;
439  // no sharding, so all devices have the same fragments
440  boost::hash_combine(cache_key_for_device, frag_list);
441  for (int i = 0; i < device_count; ++i) {
442  access_path_info.hashed_query_plan_dag[i] = cache_key_for_device;
443  }
444  } else {
445  // we need to retrieve specific fragments for each device
446  // and consider them to make a cache key for it
447  for (int i = 0; i < device_count; ++i) {
448  const auto frag_list_for_device =
449  HashJoin::collectFragmentIds(frags_for_device[i]);
450  auto cache_key_for_device = hashtable_access_path;
451  boost::hash_combine(cache_key_for_device, frag_list_for_device);
452  access_path_info.hashed_query_plan_dag[i] = cache_key_for_device;
453  }
454  }
455  access_path_info.table_keys = it->second.inputTableKeys;
456  }
457  return access_path_info;
458 }
459 
460 std::tuple<QueryPlanHash,
461  std::shared_ptr<HashTable>,
462  std::optional<HashtableCacheMetaInfo>>
464  CacheItemType hash_table_type,
465  DeviceIdentifier device_identifier) {
466  std::lock_guard<std::mutex> lock(getCacheLock());
467  auto hashtable_cache = getCachedItemContainer(hash_table_type, device_identifier);
468  for (auto& ht : *hashtable_cache) {
469  if (!visited.count(ht.key)) {
470  return std::make_tuple(ht.key, ht.cached_item, ht.meta_info);
471  }
472  }
473  return std::make_tuple(EMPTY_HASHED_PLAN_DAG_KEY, nullptr, std::nullopt);
474 }
475 
477  size_t hashed_query_plan_dag,
478  const std::unordered_set<size_t>& table_keys) {
479  std::lock_guard<std::mutex> lock(getCacheLock());
480  for (auto table_key : table_keys) {
481  auto itr = table_key_to_query_plan_dag_map_.try_emplace(table_key).first;
482  itr->second.insert(hashed_query_plan_dag);
483  }
484 }
485 
486 std::optional<std::unordered_set<size_t>>
488  std::lock_guard<std::mutex> lock(getCacheLock());
489  auto it = table_key_to_query_plan_dag_map_.find(table_key);
490  return it != table_key_to_query_plan_dag_map_.end() ? std::make_optional(it->second)
491  : std::nullopt;
492 }
493 
495  // this function is called when marking cached item for the given table_key as dirty
496  // and when we do that we already acquire the cache lock so we skip to lock in this func
497  table_key_to_query_plan_dag_map_.erase(table_key);
498 }
static std::vector< int > collectFragmentIds(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments)
Definition: HashJoin.cpp:446
#define CHECK_EQ(x, y)
Definition: Logger.h:231
bool hasItemInCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) const override
size_t DeviceIdentifier
Definition: DataRecycler.h:129
static std::string getDeviceIdentifierString(DeviceIdentifier device_identifier)
Definition: DataRecycler.h:138
JoinType
Definition: sqldefs.h:136
void putItemToCache(QueryPlanHash key, std::shared_ptr< HashTable > item_ptr, CacheItemType item_type, DeviceIdentifier device_identifier, size_t item_size, size_t compute_time, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
static bool isInvalidHashTableCacheKey(const std::vector< QueryPlanHash > &cache_keys)
std::optional< CachedItem< std::shared_ptr< HashTable >, HashtableCacheMetaInfo > > getCachedItemWithoutConsideringMetaInfo(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, CachedItemContainer &m, std::lock_guard< std::mutex > &lock)
Definition: DataRecycler.h:507
CacheMetricTracker & getMetricTracker(CacheItemType item_type)
Definition: DataRecycler.h:618
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
SQLOps
Definition: sqldefs.h:29
static size_t getJoinColumnInfoHash(std::vector< const Analyzer::ColumnVar * > &inner_cols, std::vector< const Analyzer::ColumnVar * > &outer_cols, Executor *executor)
#define CHECK_GE(x, y)
Definition: Logger.h:236
std::shared_ptr< CachedItemContainer > getCachedItemContainer(CacheItemType item_type, DeviceIdentifier device_identifier) const
Definition: DataRecycler.h:492
void markCachedItemAsDirtyImpl(QueryPlanHash key, CachedItemContainer &m) const
Definition: DataRecycler.h:468
void addQueryPlanDagForTableKeys(size_t hashed_query_plan_dag, const std::unordered_set< size_t > &table_keys)
bool g_is_test_env
Definition: Execute.cpp:141
bool g_enable_data_recycler
Definition: Execute.cpp:146
#define CHECK_GT(x, y)
Definition: Logger.h:235
void clearCacheMetricTracker()
Definition: DataRecycler.h:283
void cleanupCacheForInsertion(CacheItemType item_type, DeviceIdentifier device_identifier, size_t required_size, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
std::unordered_set< size_t > table_keys
std::vector< QueryPlanHash > hashed_query_plan_dag
std::unordered_map< size_t, HashTableBuildDag > HashTableBuildDagMap
std::unordered_map< size_t, std::unordered_set< size_t > > table_key_to_query_plan_dag_map_
CacheItemType
Definition: DataRecycler.h:38
std::unordered_map< int, const RelAlgNode * > TableIdToNodeMap
void removeItemFromCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
void sortCacheContainerByQueryMetric(CacheItemType item_type, DeviceIdentifier device_identifier)
Definition: DataRecycler.h:596
std::unordered_set< CacheItemType > const & getCacheItemType() const
Definition: DataRecycler.h:628
size_t QueryPlanHash
std::optional< std::unordered_set< size_t > > getMappedQueryPlanDagsWithTableKey(size_t table_key) const
std::string toString() const override
void markCachedItemAsDirty(size_t table_key, std::unordered_set< QueryPlanHash > &key_set, CacheItemType item_type, DeviceIdentifier device_identifier) override
void clearCache() override
bool checkOverlapsHashtableBucketCompatability(const OverlapsHashTableMetaInfo &candidate_bucket_dim, const OverlapsHashTableMetaInfo &target_bucket_dim) const
#define CHECK(condition)
Definition: Logger.h:223
void removeTableKeyInfoFromQueryPlanDagMap(size_t table_key)
std::tuple< QueryPlanHash, std::shared_ptr< HashTable >, std::optional< HashtableCacheMetaInfo > > getCachedHashtableWithoutCacheKey(std::set< size_t > &visited, CacheItemType hash_table_type, DeviceIdentifier device_identifier)
virtual std::shared_ptr< HashTable > getItemFromCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt)=0
std::vector< double > bucket_sizes
static constexpr DeviceIdentifier CPU_DEVICE_IDENTIFIER
Definition: DataRecycler.h:136
void removeCachedItemFromBeginning(CacheItemType item_type, DeviceIdentifier device_identifier, int offset)
Definition: DataRecycler.h:585
static HashtableAccessPathInfo getHashtableAccessPathInfo(const std::vector< InnerOuter > &inner_outer_pairs, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs, const SQLOps op_type, const JoinType join_type, const HashTableBuildDagMap &hashtable_build_dag_map, int device_count, int shard_count, const std::vector< std::vector< Fragmenter_Namespace::FragmentInfo >> &frags_for_device, Executor *executor)
bool g_use_hashtable_cache
Definition: Execute.cpp:147
#define VLOG(n)
Definition: Logger.h:317
static bool isSafeToCacheHashtable(const TableIdToNodeMap &table_id_to_node_map, bool need_dict_translation, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_info_pairs, const int table_id)