OmniSciDB  6686921089
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ExecutionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <mutex>
20 #include <vector>
21 
25 #include "QueryEngine/Execute.h"
28 
29 namespace {
30 
32  return !res || res->definitelyHasNoRows();
33 }
34 
35 inline bool query_has_inner_join(const RelAlgExecutionUnit& ra_exe_unit) {
36  return (std::count_if(ra_exe_unit.join_quals.begin(),
37  ra_exe_unit.join_quals.end(),
38  [](const auto& join_condition) {
39  return join_condition.type == JoinType::INNER;
40  }) > 0);
41 }
42 
43 // column is part of the target expressions, result set iteration needs it alive.
45  const RelAlgExecutionUnit& ra_exe_unit,
46  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
47  const ExecutorDeviceType device_type) {
48  CHECK(chunk->getColumnDesc());
49  const auto& chunk_ti = chunk->getColumnDesc()->columnType;
50  if (device_type == ExecutorDeviceType::CPU &&
51  (chunk_ti.is_array() ||
52  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE))) {
53  for (const auto target_expr : ra_exe_unit.target_exprs) {
54  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
55  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
56  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
57  return true;
58  }
59  }
60  }
61  if (lazy_fetch_info.empty()) {
62  return false;
63  }
64  CHECK_EQ(lazy_fetch_info.size(), ra_exe_unit.target_exprs.size());
65  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); i++) {
66  const auto target_expr = ra_exe_unit.target_exprs[i];
67  const auto& col_lazy_fetch = lazy_fetch_info[i];
68  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
69  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
70  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
71  if (col_lazy_fetch.is_lazily_fetched) {
72  // hold lazy fetched inputs for later iteration
73  return true;
74  }
75  }
76  }
77  return false;
78 }
79 
80 bool need_to_hold_chunk(const std::list<std::shared_ptr<Chunk_NS::Chunk>>& chunks,
81  const RelAlgExecutionUnit& ra_exe_unit,
82  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
83  const ExecutorDeviceType device_type) {
84  for (const auto& chunk : chunks) {
85  if (need_to_hold_chunk(chunk.get(), ra_exe_unit, lazy_fetch_info, device_type)) {
86  return true;
87  }
88  }
89 
90  return false;
91 }
92 
93 } // namespace
94 
95 const std::vector<uint64_t>& SharedKernelContext::getFragOffsets() {
96  std::lock_guard<std::mutex> lock(all_frag_row_offsets_mutex_);
97  if (all_frag_row_offsets_.empty()) {
98  all_frag_row_offsets_.resize(query_infos_.front().info.fragments.size() + 1);
99  for (size_t i = 1; i <= query_infos_.front().info.fragments.size(); ++i) {
101  all_frag_row_offsets_[i - 1] +
102  query_infos_.front().info.fragments[i - 1].getNumTuples();
103  }
104  }
105  return all_frag_row_offsets_;
106 }
107 
109  std::vector<size_t> outer_table_fragment_ids) {
110  std::lock_guard<std::mutex> lock(reduce_mutex_);
111  if (!needs_skip_result(device_results)) {
112  all_fragment_results_.emplace_back(std::move(device_results),
113  outer_table_fragment_ids);
114  }
115 }
116 
117 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
119  return all_fragment_results_;
120 }
121 
122 void ExecutionKernel::run(Executor* executor,
123  const size_t thread_idx,
124  SharedKernelContext& shared_context) {
125  DEBUG_TIMER("ExecutionKernel::run");
126  INJECT_TIMER(kernel_run);
127  std::optional<logger::QidScopeGuard> qid_scope_guard;
129  qid_scope_guard.emplace(ra_exe_unit_.query_state->setThreadLocalQueryId());
130  }
131  try {
132  runImpl(executor, thread_idx, shared_context);
133  } catch (const OutOfHostMemory& e) {
135  } catch (const std::bad_alloc& e) {
137  } catch (const OutOfRenderMemory& e) {
139  } catch (const OutOfMemory& e) {
140  throw QueryExecutionError(
142  e.what(),
146  } catch (const ColumnarConversionNotSupported& e) {
148  } catch (const TooManyLiterals& e) {
150  } catch (const StringConstInResultSet& e) {
152  } catch (const QueryExecutionError& e) {
153  throw e;
154  }
155 }
156 
157 void ExecutionKernel::runImpl(Executor* executor,
158  const size_t thread_idx,
159  SharedKernelContext& shared_context) {
160  CHECK(executor);
161  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
164  CHECK_GE(frag_list.size(), size_t(1));
165  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
166  const int outer_table_id = ra_exe_unit_.union_all
167  ? frag_list[0].table_id
168  : ra_exe_unit_.input_descs[0].getTableId();
169  CHECK_EQ(frag_list[0].table_id, outer_table_id);
170  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
171 
174 
175  auto catalog = executor->getCatalog();
176  CHECK(catalog);
177 
178  auto data_mgr = executor->getDataMgr();
179 
180  // need to own them while query executes
181  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
182  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
183  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
184  std::unique_ptr<CudaAllocator> device_allocator;
186  gpu_lock.reset(
187  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
188  device_allocator = std::make_unique<CudaAllocator>(data_mgr, chosen_device_id);
189  }
190  std::shared_ptr<FetchResult> fetch_result(new FetchResult);
191  try {
192  std::map<int, const TableFragments*> all_tables_fragments;
194  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
195 
196  *fetch_result = ra_exe_unit_.union_all
197  ? executor->fetchUnionChunks(column_fetcher,
198  ra_exe_unit_,
200  memory_level,
201  all_tables_fragments,
202  frag_list,
203  *catalog,
204  *chunk_iterators_ptr,
205  chunks,
206  device_allocator.get(),
207  thread_idx,
209  : executor->fetchChunks(column_fetcher,
210  ra_exe_unit_,
212  memory_level,
213  all_tables_fragments,
214  frag_list,
215  *catalog,
216  *chunk_iterators_ptr,
217  chunks,
218  device_allocator.get(),
219  thread_idx,
221  if (fetch_result->num_rows.empty()) {
222  return;
223  }
225  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
228  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
230  << std::to_string(cycle_budget) << " cycles";
231  }
232  } catch (const OutOfMemory&) {
233  throw QueryExecutionError(
239  return;
240  }
241 
243  if (ra_exe_unit_.input_descs.size() > 1) {
244  throw std::runtime_error("Joins not supported through external execution");
245  }
246  const auto query = serialize_to_sql(&ra_exe_unit_, catalog);
247  GroupByAndAggregate group_by_and_aggregate(executor,
249  ra_exe_unit_,
250  shared_context.getQueryInfos(),
251  executor->row_set_mem_owner_,
252  std::nullopt);
253  const auto query_mem_desc =
254  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
256  query,
257  *fetch_result,
258  executor->plan_state_.get(),
262  executor});
263  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
264  return;
265  }
266  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
267  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
268  const bool do_render = render_info_ && render_info_->isPotentialInSituRender();
269 
270  int64_t total_num_input_rows{-1};
273  total_num_input_rows = 0;
274  std::for_each(fetch_result->num_rows.begin(),
275  fetch_result->num_rows.end(),
276  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
277  total_num_input_rows = std::accumulate(frag_row_count.begin(),
278  frag_row_count.end(),
279  total_num_input_rows);
280  });
281  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
282  // TODO(adb): we may want to take this early out for all queries, but we are most
283  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
284  // 0=1)
285  if (total_num_input_rows == 0) {
286  return;
287  }
288 
290  total_num_input_rows *= ra_exe_unit_.input_descs.size();
291  }
292  }
293 
294  uint32_t start_rowid{0};
295  if (rowid_lookup_key >= 0) {
296  if (!frag_list.empty()) {
297  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
298  start_rowid = rowid_lookup_key -
299  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
300  }
301  }
302 
303 #ifdef HAVE_TBB
304  bool can_run_subkernels = shared_context.getThreadPool() != nullptr;
305 
306  // Sub-tasks are supported for groupby queries and estimators only for now.
307  bool is_groupby =
308  (ra_exe_unit_.groupby_exprs.size() > 1) ||
309  (ra_exe_unit_.groupby_exprs.size() == 1 && ra_exe_unit_.groupby_exprs.front());
310  can_run_subkernels = can_run_subkernels && (is_groupby || ra_exe_unit_.estimator);
311 
312  // In case some column is lazily fetched, we cannot mix different fragments in a single
313  // ResultSet.
314  can_run_subkernels =
315  can_run_subkernels && !executor->hasLazyFetchColumns(ra_exe_unit_.target_exprs);
316 
317  // TODO: Use another structure to hold chunks. Currently, ResultSet holds them, but with
318  // sub-tasks chunk can be referenced by many ResultSets. So, some outer structure to
319  // hold all ResultSets and all chunks is required.
320  can_run_subkernels =
321  can_run_subkernels &&
323  chunks, ra_exe_unit_, std::vector<ColumnLazyFetchInfo>(), chosen_device_type);
324 
325  // TODO: check for literals? We serialize literals before execution and hold them in
326  // result sets. Can we simply do it once and holdin an outer structure?
327  if (can_run_subkernels) {
328  size_t total_rows = fetch_result->num_rows[0][0];
329  size_t sub_size = g_cpu_sub_task_size;
330 
331  for (size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
332  sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
333  auto subtask = std::make_shared<KernelSubtask>(*this,
334  shared_context,
335  fetch_result,
336  chunk_iterators_ptr,
337  total_num_input_rows,
338  sub_start,
339  sub_size,
340  thread_idx);
341  shared_context.getThreadPool()->run(
342  [subtask, executor] { subtask->run(executor); });
343  }
344 
345  return;
346  }
347 #endif // HAVE_TBB
348 
350  try {
351  query_exe_context_owned =
353  executor,
357  total_num_input_rows,
358  fetch_result->col_buffers,
359  fetch_result->frag_offsets,
360  executor->getRowSetMemoryOwner(),
361  compilation_result.output_columnar,
363  thread_idx,
364  do_render ? render_info_ : nullptr);
365  } catch (const OutOfHostMemory& e) {
367  }
368  }
369  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
370  CHECK(query_exe_context);
371  int32_t err{0};
372 
373  if (ra_exe_unit_.groupby_exprs.empty()) {
374  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
375  compilation_result,
380  fetch_result->col_buffers,
381  query_exe_context,
382  fetch_result->num_rows,
383  fetch_result->frag_offsets,
384  data_mgr,
386  start_rowid,
387  ra_exe_unit_.input_descs.size(),
389  do_render ? render_info_ : nullptr);
390  } else {
391  if (ra_exe_unit_.union_all) {
392  VLOG(1) << "outer_table_id=" << outer_table_id
393  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
394  }
395  err = executor->executePlanWithGroupBy(ra_exe_unit_,
396  compilation_result,
400  fetch_result->col_buffers,
401  outer_tab_frag_ids,
402  query_exe_context,
403  fetch_result->num_rows,
404  fetch_result->frag_offsets,
405  data_mgr,
407  outer_table_id,
409  start_rowid,
410  ra_exe_unit_.input_descs.size(),
412  do_render ? render_info_ : nullptr);
413  }
414  if (device_results_) {
415  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
416  for (const auto& chunk : chunks) {
417  if (need_to_hold_chunk(chunk.get(),
418  ra_exe_unit_,
419  device_results_->getLazyFetchInfo(),
421  chunks_to_hold.push_back(chunk);
422  }
423  }
424  device_results_->holdChunks(chunks_to_hold);
425  device_results_->holdChunkIterators(chunk_iterators_ptr);
426  } else {
427  VLOG(1) << "null device_results.";
428  }
429  if (err) {
430  throw QueryExecutionError(err);
431  }
432  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
433 }
434 
435 #ifdef HAVE_TBB
436 
437 void KernelSubtask::run(Executor* executor) {
438  try {
439  runImpl(executor);
440  } catch (const OutOfHostMemory& e) {
442  } catch (const std::bad_alloc& e) {
444  } catch (const OutOfRenderMemory& e) {
446  } catch (const OutOfMemory& e) {
447  throw QueryExecutionError(
449  e.what(),
451  kernel_.query_mem_desc.getQueryDescriptionType(),
452  kernel_.kernel_dispatch_mode == ExecutorDispatchMode::MultifragmentKernel});
453  } catch (const ColumnarConversionNotSupported& e) {
455  } catch (const TooManyLiterals& e) {
457  } catch (const StringConstInResultSet& e) {
459  } catch (const QueryExecutionError& e) {
460  throw e;
461  }
462 }
463 
464 void KernelSubtask::runImpl(Executor* executor) {
465  auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
466  const bool do_render =
467  kernel_.render_info_ && kernel_.render_info_->isPotentialInSituRender();
468  const CompilationResult& compilation_result =
469  kernel_.query_comp_desc.getCompilationResult();
470 
471  if (!query_exe_context_owned) {
472  try {
473  // We pass fake col_buffers and frag_offsets. These are not actually used
474  // for subtasks but shouldn't pass empty structures to avoid empty results.
475  std::vector<std::vector<const int8_t*>> col_buffers(
476  fetch_result_->col_buffers.size(),
477  std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
478  std::vector<std::vector<uint64_t>> frag_offsets(
479  fetch_result_->frag_offsets.size(),
480  std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
481  query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
482  kernel_.ra_exe_unit_,
483  executor,
484  kernel_.chosen_device_type,
485  kernel_.kernel_dispatch_mode,
486  kernel_.chosen_device_id,
487  total_num_input_rows_,
488  col_buffers,
489  frag_offsets,
490  executor->getRowSetMemoryOwner(),
491  compilation_result.output_columnar,
492  kernel_.query_mem_desc.sortOnGpu(),
493  // TODO: use TBB thread id to choose allocator
494  thread_idx_,
495  do_render ? kernel_.render_info_ : nullptr);
496  } catch (const OutOfHostMemory& e) {
498  }
499  }
500 
501  const int outer_table_id = kernel_.ra_exe_unit_.union_all
502  ? kernel_.frag_list[0].table_id
503  : kernel_.ra_exe_unit_.input_descs[0].getTableId();
504  const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
505  auto catalog = executor->getCatalog();
506  CHECK(catalog);
507  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
508  CHECK(query_exe_context);
509  int32_t err{0};
510 
511  if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
512  err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
513  compilation_result,
514  kernel_.query_comp_desc.hoistLiterals(),
515  nullptr,
516  kernel_.ra_exe_unit_.target_exprs,
517  kernel_.chosen_device_type,
518  fetch_result_->col_buffers,
519  query_exe_context,
520  fetch_result_->num_rows,
521  fetch_result_->frag_offsets,
522  &catalog->getDataMgr(),
523  kernel_.chosen_device_id,
524  start_rowid_,
525  kernel_.ra_exe_unit_.input_descs.size(),
526  kernel_.eo.allow_runtime_query_interrupt,
527  do_render ? kernel_.render_info_ : nullptr,
528  start_rowid_ + num_rows_to_process_);
529  } else {
530  err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
531  compilation_result,
532  kernel_.query_comp_desc.hoistLiterals(),
533  nullptr,
534  kernel_.chosen_device_type,
535  fetch_result_->col_buffers,
536  outer_tab_frag_ids,
537  query_exe_context,
538  fetch_result_->num_rows,
539  fetch_result_->frag_offsets,
540  &catalog->getDataMgr(),
541  kernel_.chosen_device_id,
542  outer_table_id,
543  kernel_.ra_exe_unit_.scan_limit,
544  start_rowid_,
545  kernel_.ra_exe_unit_.input_descs.size(),
546  kernel_.eo.allow_runtime_query_interrupt,
547  do_render ? kernel_.render_info_ : nullptr,
548  start_rowid_ + num_rows_to_process_);
549  }
550 
551  if (err) {
552  throw QueryExecutionError(err);
553  }
554 }
555 
556 #endif // HAVE_TBB
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:217
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
Definition: Execute.cpp:79
const std::vector< uint64_t > & getFragOffsets()
ExecutorDeviceType
static const int max_gpu_count
Definition: Execute.h:1075
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:203
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:222
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1165
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:221
std::string to_string(char const *&&v)
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1166
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1164
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:54
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
#define INJECT_TIMER(DESC)
Definition: measure.h:93
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1159
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
const QueryCompilationDescriptor & query_comp_desc
const std::shared_ptr< Analyzer::Estimator > estimator
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1156
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
#define CHECK_LT(x, y)
Definition: Logger.h:219
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const FragmentsList frag_list
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
bool isPotentialInSituRender() const
Definition: RenderInfo.cpp:63
#define CHECK(condition)
Definition: Logger.h:209
#define DEBUG_TIMER(name)
Definition: Logger.h:352
const char * what() const noexceptfinal
Definition: checked_alloc.h:39
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
std::mutex reduce_mutex_
std::shared_ptr< const query_state::QueryState > query_state
SQLTypeInfo columnType
unsigned dynamic_watchdog_time_limit
static bool run
const std::vector< InputTableInfo > & query_infos_
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit, const Catalog_Namespace::Catalog *catalog)
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1160
static void computeAllTablesFragments(std::map< int, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
#define VLOG(n)
Definition: Logger.h:303
const ColumnFetcher & column_fetcher