OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
HashtableRecycler.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "HashtableRecycler.h"
18 
19 extern bool g_is_test_env;
20 
22  QueryPlanHash key,
23  CacheItemType item_type,
24  DeviceIdentifier device_identifier,
25  std::lock_guard<std::mutex>& lock,
26  std::optional<HashtableCacheMetaInfo> meta_info) const {
29  return false;
30  }
31  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
32  // hashtable cache of the *any* device type should be properly initialized
33  CHECK(hashtable_cache);
34  auto candidate_ht_it = std::find_if(
35  hashtable_cache->begin(), hashtable_cache->end(), [&key](const auto& cached_item) {
36  return cached_item.key == key;
37  });
38  if (candidate_ht_it != hashtable_cache->end()) {
39  if (item_type == OVERLAPS_HT) {
40  CHECK(candidate_ht_it->meta_info && candidate_ht_it->meta_info->overlaps_meta_info);
41  CHECK(meta_info && meta_info->overlaps_meta_info);
43  *candidate_ht_it->meta_info->overlaps_meta_info,
44  *meta_info->overlaps_meta_info)) {
45  return true;
46  }
47  } else {
48  return true;
49  }
50  }
51  return false;
52 }
53 
54 std::shared_ptr<HashTable> HashtableRecycler::getItemFromCache(
55  QueryPlanHash key,
56  CacheItemType item_type,
57  DeviceIdentifier device_identifier,
58  std::optional<HashtableCacheMetaInfo> meta_info) {
61  return nullptr;
62  }
63  std::lock_guard<std::mutex> lock(getCacheLock());
64  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
65  auto candidate_ht = getCachedItemWithoutConsideringMetaInfo(
66  key, item_type, device_identifier, *hashtable_cache, lock);
67  if (candidate_ht) {
68  bool can_return_cached_item = false;
69  if (item_type == OVERLAPS_HT) {
70  // we have to check hashtable metainfo for overlaps join hashtable
71  CHECK(candidate_ht->meta_info && candidate_ht->meta_info->overlaps_meta_info);
72  CHECK(meta_info && meta_info->overlaps_meta_info);
74  *candidate_ht->meta_info->overlaps_meta_info,
75  *meta_info->overlaps_meta_info)) {
76  can_return_cached_item = true;
77  }
78  } else {
79  can_return_cached_item = true;
80  }
81  if (can_return_cached_item) {
82  CHECK(!candidate_ht->isDirty());
83  candidate_ht->item_metric->incRefCount();
84  VLOG(1) << "[" << item_type << ", "
86  << "] Recycle item in a cache (key: " << key << ")";
87  return candidate_ht->cached_item;
88  }
89  }
90  return nullptr;
91 }
92 
94  std::shared_ptr<HashTable> item_ptr,
95  CacheItemType item_type,
96  DeviceIdentifier device_identifier,
97  size_t item_size,
98  size_t compute_time,
99  std::optional<HashtableCacheMetaInfo> meta_info) {
101  key == EMPTY_HASHED_PLAN_DAG_KEY) {
102  return;
103  }
104  std::lock_guard<std::mutex> lock(getCacheLock());
105  auto has_cached_ht = hasItemInCache(key, item_type, device_identifier, lock, meta_info);
106  if (has_cached_ht) {
107  // check to see whether the cached one is in a dirty status
108  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
109  auto candidate_it =
110  std::find_if(hashtable_cache->begin(),
111  hashtable_cache->end(),
112  [&key](const auto& cached_item) { return cached_item.key == key; });
113  bool found_candidate = false;
114  if (candidate_it != hashtable_cache->end()) {
115  if (item_type == OVERLAPS_HT) {
116  // we have to check hashtable metainfo for overlaps join hashtable
117  CHECK(candidate_it->meta_info && candidate_it->meta_info->overlaps_meta_info);
118  CHECK(meta_info && meta_info->overlaps_meta_info);
120  *candidate_it->meta_info->overlaps_meta_info,
121  *meta_info->overlaps_meta_info)) {
122  found_candidate = true;
123  }
124  } else {
125  found_candidate = true;
126  }
127  if (found_candidate && candidate_it->isDirty()) {
128  // remove the dirty item from the cache and make a room for the new one
130  key, item_type, device_identifier, lock, candidate_it->meta_info);
131  has_cached_ht = false;
132  }
133  }
134  }
135 
136  if (!has_cached_ht) {
137  // check cache's space availability
138  auto& metric_tracker = getMetricTracker(item_type);
139  auto cache_status = metric_tracker.canAddItem(device_identifier, item_size);
140  if (cache_status == CacheAvailability::UNAVAILABLE) {
141  // hashtable is too large
142  return;
143  } else if (cache_status == CacheAvailability::AVAILABLE_AFTER_CLEANUP) {
144  // we need to cleanup some cached hashtables to make a room to insert this hashtable
145  // here we try to cache the new one anyway since we don't know the importance of
146  // this hashtable yet and if it is not that frequently reused it is removed
147  // in a near future
148  auto required_size = metric_tracker.calculateRequiredSpaceForItemAddition(
149  device_identifier, item_size);
150  cleanupCacheForInsertion(item_type, device_identifier, required_size, lock);
151  }
152  // put hashtable's metric to metric tracker
153  auto new_cache_metric_ptr = metric_tracker.putNewCacheItemMetric(
154  key, device_identifier, item_size, compute_time);
155  CHECK_EQ(item_size, new_cache_metric_ptr->getMemSize());
156  // put hashtable to cache
157  VLOG(1) << "[" << item_type << ", "
158  << DataRecyclerUtil::getDeviceIdentifierString(device_identifier)
159  << "] Put item to cache (key: " << key << ")";
160  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
161  hashtable_cache->emplace_back(key, item_ptr, new_cache_metric_ptr, meta_info);
162  }
163  // we have a cached hashtable in a clean status
164  return;
165 }
166 
168  QueryPlanHash key,
169  CacheItemType item_type,
170  DeviceIdentifier device_identifier,
171  std::lock_guard<std::mutex>& lock,
172  std::optional<HashtableCacheMetaInfo> meta_info) {
174  key == EMPTY_HASHED_PLAN_DAG_KEY) {
175  return;
176  }
177  auto& cache_metrics = getMetricTracker(item_type);
178  // remove cached item from the cache
179  auto cache_metric = cache_metrics.getCacheItemMetric(key, device_identifier);
180  CHECK(cache_metric);
181  auto hashtable_size = cache_metric->getMemSize();
182  auto hashtable_container = getCachedItemContainer(item_type, device_identifier);
183  auto filter = [key](auto const& item) { return item.key == key; };
184  auto itr =
185  std::find_if(hashtable_container->cbegin(), hashtable_container->cend(), filter);
186  if (itr == hashtable_container->cend()) {
187  return;
188  } else {
189  VLOG(1) << "[" << item_type << ", "
190  << DataRecyclerUtil::getDeviceIdentifierString(device_identifier)
191  << "] remove cached item from cache (key: " << key << ")";
192  hashtable_container->erase(itr);
193  }
194  // remove cache metric
195  cache_metrics.removeCacheItemMetric(key, device_identifier);
196  // update current cache size
197  cache_metrics.updateCurrentCacheSize(
198  device_identifier, CacheUpdateAction::REMOVE, hashtable_size);
199  return;
200 }
201 
203  CacheItemType item_type,
204  DeviceIdentifier device_identifier,
205  size_t required_size,
206  std::lock_guard<std::mutex>& lock,
207  std::optional<HashtableCacheMetaInfo> meta_info) {
208  // sort the vector based on the importance of the cached items (by # referenced, size
209  // and compute time) and then remove unimportant cached items
210  int elimination_target_offset = 0;
211  size_t removed_size = 0;
212  auto& metric_tracker = getMetricTracker(item_type);
213  auto actual_space_to_free = metric_tracker.getTotalCacheSize() / 2;
214  if (!g_is_test_env && required_size < actual_space_to_free) {
215  // remove enough items to avoid too frequent cache cleanup
216  // we do not apply thin to test code since test scenarios are designed to
217  // specific size of items and their caches
218  required_size = actual_space_to_free;
219  }
220  metric_tracker.sortCacheInfoByQueryMetric(device_identifier);
221  auto cached_item_metrics = metric_tracker.getCacheItemMetrics(device_identifier);
222  sortCacheContainerByQueryMetric(item_type, device_identifier);
223 
224  // collect targets to eliminate
225  for (auto& metric : cached_item_metrics) {
226  auto target_size = metric->getMemSize();
227  ++elimination_target_offset;
228  removed_size += target_size;
229  if (removed_size > required_size) {
230  break;
231  }
232  }
233 
234  // eliminate targets in 1) cache container and 2) their metrics
235  removeCachedItemFromBeginning(item_type, device_identifier, elimination_target_offset);
236  metric_tracker.removeMetricFromBeginning(device_identifier, elimination_target_offset);
237 
238  // update the current cache size after this cleanup
239  metric_tracker.updateCurrentCacheSize(
240  device_identifier, CacheUpdateAction::REMOVE, removed_size);
241 }
242 
244  std::lock_guard<std::mutex> lock(getCacheLock());
245  for (auto& item_type : getCacheItemType()) {
247  auto item_cache = getItemCache().find(item_type)->second;
248  for (auto& kv : *item_cache) {
249  if (!kv.second->empty()) {
250  VLOG(1) << "[" << item_type << ", "
253  << "] clear cache (# items: " << kv.second->size() << ")";
254  kv.second->clear();
255  }
256  }
257  }
259 }
260 
262  std::unordered_set<QueryPlanHash>& key_set,
263  CacheItemType item_type,
264  DeviceIdentifier device_identifier) {
265  if (!g_enable_data_recycler || !g_use_hashtable_cache || key_set.empty()) {
266  return;
267  }
268  std::lock_guard<std::mutex> lock(getCacheLock());
269  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
270  for (auto key : key_set) {
271  markCachedItemAsDirtyImpl(key, *hashtable_cache);
272  }
273  // after marking all cached hashtable having the given "table_key" as its one of input,
274  // we remove the mapping between the table_key -> hashed_query_plan_dag
275  // since we do not need to care about "already marked" item in the cache
277 
278  // hash tables built from synthetically generated tables have no chance to be cleared
279  // since we assume their table keys as {-1, -1}
280  // this means that typically we do not have a chance to invalidate cached items
281  // by calling invalidation request with table key {-1, -1}
282  // thus, we manually invalidate them here to maintain cache space based on the
283  // assumption that synthetically generated table is not frequently used as typical
284  // tables do
285  removeCachedHashtableBuiltFromSyntheticTable(item_type, device_identifier, lock);
286 }
287 
289  CacheItemType item_type,
290  DeviceIdentifier device_identifier,
291  std::lock_guard<std::mutex>& lock) {
292  auto hashtable_cache = getCachedItemContainer(item_type, device_identifier);
293  CHECK(hashtable_cache);
294  auto unitary_table_key = DataRecyclerUtil::getUnitaryTableKey();
295  auto key_set_it = table_key_to_query_plan_dag_map_.find(unitary_table_key);
296  if (key_set_it != table_key_to_query_plan_dag_map_.end()) {
297  auto& key_set = key_set_it->second;
298  for (auto key : key_set) {
299  removeItemFromCache(key, item_type, device_identifier, lock);
300  }
301  // after marking all cached hashtable having the given "table_key" as its one of
302  // input, we remove the mapping between the table_key -> hashed_query_plan_dag since
303  // we do not need to care about "already marked" item in the cache
304  removeTableKeyInfoFromQueryPlanDagMap(unitary_table_key);
305  }
306 }
307 
308 std::string HashtableRecycler::toString() const {
309  std::ostringstream oss;
310  oss << "A current status of the Hashtable Recycler:\n";
311  for (auto& item_type : getCacheItemType()) {
312  oss << "\t" << item_type;
313  auto& metric_tracker = getMetricTracker(item_type);
314  oss << "\n\t# cached hashtables:\n";
315  auto item_cache = getItemCache().find(item_type)->second;
316  for (auto& cache_container : *item_cache) {
317  oss << "\t\tDevice"
318  << DataRecyclerUtil::getDeviceIdentifierString(cache_container.first)
319  << ", # hashtables: " << cache_container.second->size() << "\n";
320  for (auto& ht : *cache_container.second) {
321  oss << "\t\t\tHT] " << ht.item_metric->toString() << "\n";
322  }
323  }
324  oss << "\t" << metric_tracker.toString() << "\n";
325  }
326  return oss.str();
327 }
328 
330  const OverlapsHashTableMetaInfo& candidate,
331  const OverlapsHashTableMetaInfo& target) const {
332  if (candidate.bucket_sizes.size() != target.bucket_sizes.size()) {
333  return false;
334  }
335  for (size_t i = 0; i < candidate.bucket_sizes.size(); i++) {
336  if (std::abs(target.bucket_sizes[i] - candidate.bucket_sizes[i]) > 1e-4) {
337  return false;
338  }
339  }
340  auto threshold_check =
342  auto hashtable_size_check =
344  return threshold_check && hashtable_size_check;
345 }
346 
348  std::vector<const Analyzer::ColumnVar*>& inner_cols,
349  std::vector<const Analyzer::ColumnVar*>& outer_cols,
350  Executor* executor) {
351  auto hashed_join_col_info = EMPTY_HASHED_PLAN_DAG_KEY;
352  boost::hash_combine(
353  hashed_join_col_info,
354  executor->getQueryPlanDagCache().translateColVarsToInfoHash(inner_cols, false));
355  boost::hash_combine(
356  hashed_join_col_info,
357  executor->getQueryPlanDagCache().translateColVarsToInfoHash(outer_cols, false));
358  return hashed_join_col_info;
359 }
360 
362  const TableIdToNodeMap& table_id_to_node_map,
363  bool need_dict_translation,
364  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_info_pairs,
365  const shared::TableKey& table_key) {
366  // if hashtable is built from subquery's resultset we need to check
367  // 1) whether resulset rows can have inconsistency, e.g., rows can randomly be
368  // permutated per execution and 2) whether it needs dictionary translation for hashtable
369  // building to recycle the hashtable safely
370  auto getNodeByTableId =
371  [&table_id_to_node_map](
372  const shared::TableKey& table_key_param) -> const RelAlgNode* {
373  auto it = table_id_to_node_map.find(table_key_param);
374  if (it != table_id_to_node_map.end()) {
375  return it->second;
376  }
377  return nullptr;
378  };
379  bool found_sort_node = false;
380  bool found_project_node = false;
381  if (table_key.table_id < 0) {
382  const auto origin_table_id = table_key.table_id * -1;
383  const auto inner_node = getNodeByTableId({table_key.db_id, origin_table_id});
384  if (!inner_node) {
385  // we have to keep the node info of temporary resultset
386  // so in this case we are not safe to recycle the hashtable
387  return false;
388  }
389  // it is not safe to recycle the hashtable when
390  // this resultset may have resultset ordering inconsistency and/or
391  // need dictionary translation for hashtable building
392  auto sort_node = dynamic_cast<const RelSort*>(inner_node);
393  if (sort_node) {
394  found_sort_node = true;
395  } else {
396  auto project_node = dynamic_cast<const RelProject*>(inner_node);
397  if (project_node) {
398  found_project_node = true;
399  }
400  }
401  }
402  return !(found_sort_node || (found_project_node && need_dict_translation));
403 }
404 
406  const std::vector<QueryPlanHash>& cache_keys) {
407  return cache_keys.empty() ||
408  std::any_of(cache_keys.cbegin(), cache_keys.cend(), [](QueryPlanHash key) {
409  return key == EMPTY_HASHED_PLAN_DAG_KEY;
410  });
411 }
412 
414  const std::vector<InnerOuter>& inner_outer_pairs,
415  const std::vector<InnerOuterStringOpInfos>& inner_outer_string_op_infos_pairs,
416  const SQLOps op_type,
417  const JoinType join_type,
418  const HashTableBuildDagMap& hashtable_build_dag_map,
419  int device_count,
420  int shard_count,
421  const std::vector<std::vector<Fragmenter_Namespace::FragmentInfo>>& frags_for_device,
422  Executor* executor) {
423  CHECK_GT(device_count, (int)0);
424  CHECK_GE(shard_count, (int)0);
425  std::vector<const Analyzer::ColumnVar*> inner_cols_vec, outer_cols_vec;
426  size_t join_qual_info = EMPTY_HASHED_PLAN_DAG_KEY;
427  for (auto& join_col_pair : inner_outer_pairs) {
428  inner_cols_vec.push_back(join_col_pair.first);
429  // extract inner join col's id
430  // b/c when the inner col comes from a subquery's resulset,
431  // table id / rte_index can be different even if we have the same
432  // subquery's semantic, i.e., project col A from table T
433  boost::hash_combine(join_qual_info,
434  executor->getQueryPlanDagCache().getJoinColumnsInfoHash(
435  join_col_pair.first, JoinColumnSide::kDirect, true));
436  boost::hash_combine(join_qual_info, op_type);
437  boost::hash_combine(join_qual_info, join_type);
438  auto outer_col_var = dynamic_cast<const Analyzer::ColumnVar*>(join_col_pair.second);
439  boost::hash_combine(join_qual_info, join_col_pair.first->get_type_info().toString());
440  if (outer_col_var) {
441  outer_cols_vec.push_back(outer_col_var);
442  if (join_col_pair.first->get_type_info().is_dict_encoded_string()) {
443  // add comp param for dict encoded string
444  boost::hash_combine(join_qual_info,
445  executor->getQueryPlanDagCache().getJoinColumnsInfoHash(
446  outer_col_var, JoinColumnSide::kDirect, true));
447  boost::hash_combine(join_qual_info, outer_col_var->get_type_info().toString());
448  }
449  }
450  }
451 
452  if (inner_outer_string_op_infos_pairs.size()) {
453  boost::hash_combine(join_qual_info, ::toString(inner_outer_string_op_infos_pairs));
454  }
455 
456  auto join_cols_info = getJoinColumnInfoHash(inner_cols_vec, outer_cols_vec, executor);
457  HashtableAccessPathInfo access_path_info(device_count);
458  auto it = hashtable_build_dag_map.find(join_cols_info);
459  if (it != hashtable_build_dag_map.end()) {
460  size_t hashtable_access_path = EMPTY_HASHED_PLAN_DAG_KEY;
461  boost::hash_combine(hashtable_access_path, it->second.inner_cols_access_path);
462  boost::hash_combine(hashtable_access_path, join_qual_info);
463  if (inner_cols_vec.front()->get_type_info().is_dict_encoded_string()) {
464  boost::hash_combine(hashtable_access_path, it->second.outer_cols_access_path);
465  }
466  boost::hash_combine(hashtable_access_path, shard_count);
467 
468  if (!shard_count) {
469  const auto frag_list = HashJoin::collectFragmentIds(frags_for_device[0]);
470  auto cache_key_for_device = hashtable_access_path;
471  // no sharding, so all devices have the same fragments
472  boost::hash_combine(cache_key_for_device, frag_list);
473  for (int i = 0; i < device_count; ++i) {
474  access_path_info.hashed_query_plan_dag[i] = cache_key_for_device;
475  }
476  } else {
477  // we need to retrieve specific fragments for each device
478  // and consider them to make a cache key for it
479  for (int i = 0; i < device_count; ++i) {
480  const auto frag_list_for_device =
481  HashJoin::collectFragmentIds(frags_for_device[i]);
482  auto cache_key_for_device = hashtable_access_path;
483  boost::hash_combine(cache_key_for_device, frag_list_for_device);
484  access_path_info.hashed_query_plan_dag[i] = cache_key_for_device;
485  }
486  }
487  access_path_info.table_keys = it->second.inputTableKeys;
488  }
489  return access_path_info;
490 }
491 
492 std::tuple<QueryPlanHash,
493  std::shared_ptr<HashTable>,
494  std::optional<HashtableCacheMetaInfo>>
496  CacheItemType hash_table_type,
497  DeviceIdentifier device_identifier) {
498  std::lock_guard<std::mutex> lock(getCacheLock());
499  auto hashtable_cache = getCachedItemContainer(hash_table_type, device_identifier);
500  for (auto& ht : *hashtable_cache) {
501  if (!visited.count(ht.key)) {
502  return std::make_tuple(ht.key, ht.cached_item, ht.meta_info);
503  }
504  }
505  return std::make_tuple(EMPTY_HASHED_PLAN_DAG_KEY, nullptr, std::nullopt);
506 }
507 
509  size_t hashed_query_plan_dag,
510  const std::unordered_set<size_t>& table_keys) {
511  std::lock_guard<std::mutex> lock(getCacheLock());
512  for (auto table_key : table_keys) {
513  auto itr = table_key_to_query_plan_dag_map_.try_emplace(table_key).first;
514  itr->second.insert(hashed_query_plan_dag);
515  }
516 }
517 
518 std::optional<std::unordered_set<size_t>>
520  std::lock_guard<std::mutex> lock(getCacheLock());
521  auto it = table_key_to_query_plan_dag_map_.find(table_key);
522  return it != table_key_to_query_plan_dag_map_.end() ? std::make_optional(it->second)
523  : std::nullopt;
524 }
525 
527  // this function is called when marking cached item for the given table_key as dirty
528  // and when we do that we already acquire the cache lock so we skip to lock in this func
529  table_key_to_query_plan_dag_map_.erase(table_key);
530 }
static std::vector< int > collectFragmentIds(const std::vector< Fragmenter_Namespace::FragmentInfo > &fragments)
Definition: HashJoin.cpp:452
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool hasItemInCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) const override
size_t DeviceIdentifier
Definition: DataRecycler.h:129
static std::string getDeviceIdentifierString(DeviceIdentifier device_identifier)
Definition: DataRecycler.h:138
JoinType
Definition: sqldefs.h:165
void putItemToCache(QueryPlanHash key, std::shared_ptr< HashTable > item_ptr, CacheItemType item_type, DeviceIdentifier device_identifier, size_t item_size, size_t compute_time, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
static bool isInvalidHashTableCacheKey(const std::vector< QueryPlanHash > &cache_keys)
std::optional< CachedItem< std::shared_ptr< HashTable >, HashtableCacheMetaInfo > > getCachedItemWithoutConsideringMetaInfo(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, CachedItemContainer &m, std::lock_guard< std::mutex > &lock)
Definition: DataRecycler.h:543
CacheMetricTracker & getMetricTracker(CacheItemType item_type)
Definition: DataRecycler.h:654
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
SQLOps
Definition: sqldefs.h:28
static size_t getJoinColumnInfoHash(std::vector< const Analyzer::ColumnVar * > &inner_cols, std::vector< const Analyzer::ColumnVar * > &outer_cols, Executor *executor)
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::shared_ptr< CachedItemContainer > getCachedItemContainer(CacheItemType item_type, DeviceIdentifier device_identifier) const
Definition: DataRecycler.h:528
void markCachedItemAsDirtyImpl(QueryPlanHash key, CachedItemContainer &m) const
Definition: DataRecycler.h:504
void addQueryPlanDagForTableKeys(size_t hashed_query_plan_dag, const std::unordered_set< size_t > &table_keys)
bool g_is_test_env
Definition: Execute.cpp:141
bool g_enable_data_recycler
Definition: Execute.cpp:146
#define CHECK_GT(x, y)
Definition: Logger.h:305
void clearCacheMetricTracker()
Definition: DataRecycler.h:317
void removeCachedHashtableBuiltFromSyntheticTable(CacheItemType item_type, DeviceIdentifier device_identifier, std::lock_guard< std::mutex > &lock)
void cleanupCacheForInsertion(CacheItemType item_type, DeviceIdentifier device_identifier, size_t required_size, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
std::unordered_set< size_t > table_keys
std::vector< QueryPlanHash > hashed_query_plan_dag
std::unordered_map< size_t, HashTableBuildDag > HashTableBuildDagMap
std::unordered_map< size_t, std::unordered_set< size_t > > table_key_to_query_plan_dag_map_
CacheItemType
Definition: DataRecycler.h:38
static QueryPlanHash getUnitaryTableKey()
Definition: DataRecycler.h:145
std::unordered_map< shared::TableKey, const RelAlgNode * > TableIdToNodeMap
void removeItemFromCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::lock_guard< std::mutex > &lock, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt) override
void sortCacheContainerByQueryMetric(CacheItemType item_type, DeviceIdentifier device_identifier)
Definition: DataRecycler.h:632
std::unordered_set< CacheItemType > const & getCacheItemType() const
Definition: DataRecycler.h:664
size_t QueryPlanHash
std::optional< std::unordered_set< size_t > > getMappedQueryPlanDagsWithTableKey(size_t table_key) const
std::string toString() const override
void markCachedItemAsDirty(size_t table_key, std::unordered_set< QueryPlanHash > &key_set, CacheItemType item_type, DeviceIdentifier device_identifier) override
void clearCache() override
bool checkOverlapsHashtableBucketCompatability(const OverlapsHashTableMetaInfo &candidate_bucket_dim, const OverlapsHashTableMetaInfo &target_bucket_dim) const
#define CHECK(condition)
Definition: Logger.h:291
static bool isSafeToCacheHashtable(const TableIdToNodeMap &table_id_to_node_map, bool need_dict_translation, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_info_pairs, const shared::TableKey &table_key)
void removeTableKeyInfoFromQueryPlanDagMap(size_t table_key)
std::tuple< QueryPlanHash, std::shared_ptr< HashTable >, std::optional< HashtableCacheMetaInfo > > getCachedHashtableWithoutCacheKey(std::set< size_t > &visited, CacheItemType hash_table_type, DeviceIdentifier device_identifier)
virtual std::shared_ptr< HashTable > getItemFromCache(QueryPlanHash key, CacheItemType item_type, DeviceIdentifier device_identifier, std::optional< HashtableCacheMetaInfo > meta_info=std::nullopt)=0
bool any_of(std::vector< Analyzer::Expr * > const &target_exprs)
std::vector< double > bucket_sizes
static constexpr DeviceIdentifier CPU_DEVICE_IDENTIFIER
Definition: DataRecycler.h:136
void removeCachedItemFromBeginning(CacheItemType item_type, DeviceIdentifier device_identifier, int offset)
Definition: DataRecycler.h:621
static HashtableAccessPathInfo getHashtableAccessPathInfo(const std::vector< InnerOuter > &inner_outer_pairs, const std::vector< InnerOuterStringOpInfos > &inner_outer_string_op_infos_pairs, const SQLOps op_type, const JoinType join_type, const HashTableBuildDagMap &hashtable_build_dag_map, int device_count, int shard_count, const std::vector< std::vector< Fragmenter_Namespace::FragmentInfo >> &frags_for_device, Executor *executor)
bool g_use_hashtable_cache
Definition: Execute.cpp:147
#define VLOG(n)
Definition: Logger.h:387