OmniSciDB  a987f07e93
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ExecutionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <mutex>
20 #include <vector>
21 
25 #include "QueryEngine/Execute.h"
29 
30 namespace {
31 
33  return !res || res->definitelyHasNoRows();
34 }
35 
36 inline bool query_has_inner_join(const RelAlgExecutionUnit& ra_exe_unit) {
37  return (std::count_if(ra_exe_unit.join_quals.begin(),
38  ra_exe_unit.join_quals.end(),
39  [](const auto& join_condition) {
40  return join_condition.type == JoinType::INNER;
41  }) > 0);
42 }
43 
44 // column is part of the target expressions, result set iteration needs it alive.
46  const RelAlgExecutionUnit& ra_exe_unit,
47  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
48  const ExecutorDeviceType device_type) {
49  CHECK(chunk->getColumnDesc());
50  const auto& chunk_ti = chunk->getColumnDesc()->columnType;
51  if (device_type == ExecutorDeviceType::CPU &&
52  (chunk_ti.is_array() ||
53  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE))) {
54  for (const auto target_expr : ra_exe_unit.target_exprs) {
55  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
56  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
57  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
58  return true;
59  }
60  }
61  }
62  if (lazy_fetch_info.empty()) {
63  return false;
64  }
65  CHECK_EQ(lazy_fetch_info.size(), ra_exe_unit.target_exprs.size());
66  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); i++) {
67  const auto target_expr = ra_exe_unit.target_exprs[i];
68  const auto& col_lazy_fetch = lazy_fetch_info[i];
69  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
70  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
71  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
72  if (col_lazy_fetch.is_lazily_fetched) {
73  // hold lazy fetched inputs for later iteration
74  return true;
75  }
76  }
77  }
78  return false;
79 }
80 
81 bool need_to_hold_chunk(const std::list<std::shared_ptr<Chunk_NS::Chunk>>& chunks,
82  const RelAlgExecutionUnit& ra_exe_unit,
83  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
84  const ExecutorDeviceType device_type) {
85  for (const auto& chunk : chunks) {
86  if (need_to_hold_chunk(chunk.get(), ra_exe_unit, lazy_fetch_info, device_type)) {
87  return true;
88  }
89  }
90 
91  return false;
92 }
93 
94 } // namespace
95 
96 const std::vector<uint64_t>& SharedKernelContext::getFragOffsets() {
97  std::lock_guard<std::mutex> lock(all_frag_row_offsets_mutex_);
98  if (all_frag_row_offsets_.empty()) {
99  all_frag_row_offsets_.resize(query_infos_.front().info.fragments.size() + 1);
100  for (size_t i = 1; i <= query_infos_.front().info.fragments.size(); ++i) {
102  all_frag_row_offsets_[i - 1] +
103  query_infos_.front().info.fragments[i - 1].getNumTuples();
104  }
105  }
106  return all_frag_row_offsets_;
107 }
108 
110  std::vector<size_t> outer_table_fragment_ids) {
111  std::lock_guard<std::mutex> lock(reduce_mutex_);
112  if (!needs_skip_result(device_results)) {
113  all_fragment_results_.emplace_back(std::move(device_results),
114  outer_table_fragment_ids);
115  }
116 }
117 
118 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
120  return all_fragment_results_;
121 }
122 
123 void ExecutionKernel::run(Executor* executor,
124  const size_t thread_idx,
125  SharedKernelContext& shared_context) {
126  DEBUG_TIMER("ExecutionKernel::run");
127  INJECT_TIMER(kernel_run);
128  try {
129  runImpl(executor, thread_idx, shared_context);
130  } catch (const OutOfHostMemory& e) {
132  } catch (const std::bad_alloc& e) {
134  } catch (const OutOfRenderMemory& e) {
136  } catch (const OutOfMemory& e) {
137  throw QueryExecutionError(
139  e.what(),
143  } catch (const ColumnarConversionNotSupported& e) {
145  } catch (const TooManyLiterals& e) {
147  } catch (const StringConstInResultSet& e) {
149  } catch (const QueryExecutionError& e) {
150  throw e;
151  }
152 }
153 
154 void ExecutionKernel::runImpl(Executor* executor,
155  const size_t thread_idx,
156  SharedKernelContext& shared_context) {
157  CHECK(executor);
158  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
161  CHECK_GE(frag_list.size(), size_t(1));
162  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
163  const int outer_table_id = ra_exe_unit_.union_all
164  ? frag_list[0].table_id
165  : ra_exe_unit_.input_descs[0].getTableId();
166  CHECK_EQ(frag_list[0].table_id, outer_table_id);
167  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
168 
171 
172  auto catalog = executor->getCatalog();
173  CHECK(catalog);
174 
175  auto data_mgr = executor->getDataMgr();
176 
177  // need to own them while query executes
178  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
179  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
180  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
181  std::unique_ptr<CudaAllocator> device_allocator;
183  gpu_lock.reset(
184  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
185  device_allocator = std::make_unique<CudaAllocator>(
186  data_mgr, chosen_device_id, getQueryEngineCudaStreamForDevice(chosen_device_id));
187  }
188  std::shared_ptr<FetchResult> fetch_result(new FetchResult);
189  try {
190  std::map<int, const TableFragments*> all_tables_fragments;
192  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
193 
194  *fetch_result = ra_exe_unit_.union_all
195  ? executor->fetchUnionChunks(column_fetcher,
196  ra_exe_unit_,
198  memory_level,
199  all_tables_fragments,
200  frag_list,
201  *catalog,
202  *chunk_iterators_ptr,
203  chunks,
204  device_allocator.get(),
205  thread_idx,
207  : executor->fetchChunks(column_fetcher,
208  ra_exe_unit_,
210  memory_level,
211  all_tables_fragments,
212  frag_list,
213  *catalog,
214  *chunk_iterators_ptr,
215  chunks,
216  device_allocator.get(),
217  thread_idx,
219  if (fetch_result->num_rows.empty()) {
220  return;
221  }
223  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
226  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
228  << std::to_string(cycle_budget) << " cycles";
229  }
230  } catch (const OutOfMemory&) {
231  throw QueryExecutionError(
237  return;
238  }
239 
241  if (ra_exe_unit_.input_descs.size() > 1) {
242  throw std::runtime_error("Joins not supported through external execution");
243  }
244  const auto query = serialize_to_sql(&ra_exe_unit_, catalog);
245  GroupByAndAggregate group_by_and_aggregate(executor,
247  ra_exe_unit_,
248  shared_context.getQueryInfos(),
249  executor->row_set_mem_owner_,
250  std::nullopt);
251  const auto query_mem_desc =
252  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
254  query,
255  *fetch_result,
256  executor->plan_state_.get(),
260  executor});
261  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
262  return;
263  }
264  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
265  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
266  const bool do_render = render_info_ && render_info_->isInSitu();
267 
268  int64_t total_num_input_rows{-1};
271  total_num_input_rows = 0;
272  std::for_each(fetch_result->num_rows.begin(),
273  fetch_result->num_rows.end(),
274  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
275  total_num_input_rows = std::accumulate(frag_row_count.begin(),
276  frag_row_count.end(),
277  total_num_input_rows);
278  });
279  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
280  // TODO(adb): we may want to take this early out for all queries, but we are most
281  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
282  // 0=1)
283  if (total_num_input_rows == 0) {
284  return;
285  }
286 
288  total_num_input_rows *= ra_exe_unit_.input_descs.size();
289  }
290  }
291 
292  uint32_t start_rowid{0};
293  if (rowid_lookup_key >= 0) {
294  if (!frag_list.empty()) {
295  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
296  start_rowid = rowid_lookup_key -
297  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
298  }
299  }
300 
301 #ifdef HAVE_TBB
302  bool can_run_subkernels = shared_context.getThreadPool() != nullptr;
303 
304  // Sub-tasks are supported for groupby queries and estimators only for now.
305  bool is_groupby =
306  (ra_exe_unit_.groupby_exprs.size() > 1) ||
307  (ra_exe_unit_.groupby_exprs.size() == 1 && ra_exe_unit_.groupby_exprs.front());
308  can_run_subkernels = can_run_subkernels && (is_groupby || ra_exe_unit_.estimator);
309 
310  // In case some column is lazily fetched, we cannot mix different fragments in a single
311  // ResultSet.
312  can_run_subkernels =
313  can_run_subkernels && !executor->hasLazyFetchColumns(ra_exe_unit_.target_exprs);
314 
315  // TODO: Use another structure to hold chunks. Currently, ResultSet holds them, but with
316  // sub-tasks chunk can be referenced by many ResultSets. So, some outer structure to
317  // hold all ResultSets and all chunks is required.
318  can_run_subkernels =
319  can_run_subkernels &&
321  chunks, ra_exe_unit_, std::vector<ColumnLazyFetchInfo>(), chosen_device_type);
322 
323  // TODO: check for literals? We serialize literals before execution and hold them in
324  // result sets. Can we simply do it once and holdin an outer structure?
325  if (can_run_subkernels) {
326  size_t total_rows = fetch_result->num_rows[0][0];
327  size_t sub_size = g_cpu_sub_task_size;
328 
329  for (size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
330  sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
331  auto subtask = std::make_shared<KernelSubtask>(*this,
332  shared_context,
333  fetch_result,
334  chunk_iterators_ptr,
335  total_num_input_rows,
336  sub_start,
337  sub_size,
338  thread_idx);
339  shared_context.getThreadPool()->run(
340  [subtask, executor] { subtask->run(executor); });
341  }
342 
343  return;
344  }
345 #endif // HAVE_TBB
346 
348  try {
349  // std::unique_ptr<QueryExecutionContext> query_exe_context_owned
350  // has std::unique_ptr<QueryMemoryInitializer> query_buffers_
351  // has std::vector<std::unique_ptr<ResultSet>> result_sets_
352  // has std::unique_ptr<ResultSetStorage> storage_
353  // which are initialized and possibly allocated here.
354  query_exe_context_owned =
356  executor,
360  outer_table_id,
361  total_num_input_rows,
362  fetch_result->col_buffers,
363  fetch_result->frag_offsets,
364  executor->getRowSetMemoryOwner(),
365  compilation_result.output_columnar,
367  thread_idx,
368  do_render ? render_info_ : nullptr);
369  } catch (const OutOfHostMemory& e) {
371  }
372  }
373  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
374  CHECK(query_exe_context);
375  int32_t err{0};
376  bool optimize_cuda_block_and_grid_sizes =
379 
380  if (ra_exe_unit_.groupby_exprs.empty()) {
381  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
382  compilation_result,
387  fetch_result->col_buffers,
388  query_exe_context,
389  fetch_result->num_rows,
390  fetch_result->frag_offsets,
391  data_mgr,
393  start_rowid,
394  ra_exe_unit_.input_descs.size(),
396  do_render ? render_info_ : nullptr,
397  optimize_cuda_block_and_grid_sizes);
398  } else {
399  if (ra_exe_unit_.union_all) {
400  VLOG(1) << "outer_table_id=" << outer_table_id
401  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
402  }
403  err = executor->executePlanWithGroupBy(ra_exe_unit_,
404  compilation_result,
408  fetch_result->col_buffers,
409  outer_tab_frag_ids,
410  query_exe_context,
411  fetch_result->num_rows,
412  fetch_result->frag_offsets,
413  data_mgr,
415  outer_table_id,
417  start_rowid,
418  ra_exe_unit_.input_descs.size(),
420  do_render ? render_info_ : nullptr,
421  optimize_cuda_block_and_grid_sizes);
422  }
423  if (device_results_) {
424  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
425  for (const auto& chunk : chunks) {
426  if (need_to_hold_chunk(chunk.get(),
427  ra_exe_unit_,
428  device_results_->getLazyFetchInfo(),
430  chunks_to_hold.push_back(chunk);
431  }
432  }
433  device_results_->holdChunks(chunks_to_hold);
434  device_results_->holdChunkIterators(chunk_iterators_ptr);
435  } else {
436  VLOG(1) << "null device_results.";
437  }
438  if (err) {
439  throw QueryExecutionError(err);
440  }
441  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
442 }
443 
444 #ifdef HAVE_TBB
445 
446 void KernelSubtask::run(Executor* executor) {
447  try {
448  runImpl(executor);
449  } catch (const OutOfHostMemory& e) {
451  } catch (const std::bad_alloc& e) {
453  } catch (const OutOfRenderMemory& e) {
455  } catch (const OutOfMemory& e) {
456  throw QueryExecutionError(
458  e.what(),
460  kernel_.query_mem_desc.getQueryDescriptionType(),
461  kernel_.kernel_dispatch_mode == ExecutorDispatchMode::MultifragmentKernel});
462  } catch (const ColumnarConversionNotSupported& e) {
464  } catch (const TooManyLiterals& e) {
466  } catch (const StringConstInResultSet& e) {
468  } catch (const QueryExecutionError& e) {
469  throw e;
470  }
471 }
472 
473 void KernelSubtask::runImpl(Executor* executor) {
474  auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
475  const bool do_render = kernel_.render_info_ && kernel_.render_info_->isInSitu();
476  const CompilationResult& compilation_result =
477  kernel_.query_comp_desc.getCompilationResult();
478  const int outer_table_id = kernel_.ra_exe_unit_.union_all
479  ? kernel_.frag_list[0].table_id
480  : kernel_.ra_exe_unit_.input_descs[0].getTableId();
481 
482  if (!query_exe_context_owned) {
483  try {
484  // We pass fake col_buffers and frag_offsets. These are not actually used
485  // for subtasks but shouldn't pass empty structures to avoid empty results.
486  std::vector<std::vector<const int8_t*>> col_buffers(
487  fetch_result_->col_buffers.size(),
488  std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
489  std::vector<std::vector<uint64_t>> frag_offsets(
490  fetch_result_->frag_offsets.size(),
491  std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
492  query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
493  kernel_.ra_exe_unit_,
494  executor,
495  kernel_.chosen_device_type,
496  kernel_.kernel_dispatch_mode,
497  kernel_.chosen_device_id,
498  outer_table_id,
499  total_num_input_rows_,
500  col_buffers,
501  frag_offsets,
502  executor->getRowSetMemoryOwner(),
503  compilation_result.output_columnar,
504  kernel_.query_mem_desc.sortOnGpu(),
505  // TODO: use TBB thread id to choose allocator
506  thread_idx_,
507  do_render ? kernel_.render_info_ : nullptr);
508  } catch (const OutOfHostMemory& e) {
510  }
511  }
512 
513  const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
514  auto catalog = executor->getCatalog();
515  CHECK(catalog);
516  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
517  CHECK(query_exe_context);
518  int32_t err{0};
519  bool optimize_cuda_block_and_grid_sizes =
520  kernel_.chosen_device_type == ExecutorDeviceType::GPU &&
521  kernel_.eo.optimize_cuda_block_and_grid_sizes;
522  if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
523  err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
524  compilation_result,
525  kernel_.query_comp_desc.hoistLiterals(),
526  nullptr,
527  kernel_.ra_exe_unit_.target_exprs,
528  kernel_.chosen_device_type,
529  fetch_result_->col_buffers,
530  query_exe_context,
531  fetch_result_->num_rows,
532  fetch_result_->frag_offsets,
533  &catalog->getDataMgr(),
534  kernel_.chosen_device_id,
535  start_rowid_,
536  kernel_.ra_exe_unit_.input_descs.size(),
537  kernel_.eo.allow_runtime_query_interrupt,
538  do_render ? kernel_.render_info_ : nullptr,
539  optimize_cuda_block_and_grid_sizes,
540  start_rowid_ + num_rows_to_process_);
541  } else {
542  err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
543  compilation_result,
544  kernel_.query_comp_desc.hoistLiterals(),
545  nullptr,
546  kernel_.chosen_device_type,
547  fetch_result_->col_buffers,
548  outer_tab_frag_ids,
549  query_exe_context,
550  fetch_result_->num_rows,
551  fetch_result_->frag_offsets,
552  &catalog->getDataMgr(),
553  kernel_.chosen_device_id,
554  outer_table_id,
555  kernel_.ra_exe_unit_.scan_limit,
556  start_rowid_,
557  kernel_.ra_exe_unit_.input_descs.size(),
558  kernel_.eo.allow_runtime_query_interrupt,
559  do_render ? kernel_.render_info_ : nullptr,
560  optimize_cuda_block_and_grid_sizes,
561  start_rowid_ + num_rows_to_process_);
562  }
563 
564  if (err) {
565  throw QueryExecutionError(err);
566  }
567 }
568 
569 #endif // HAVE_TBB
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:297
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
Definition: Execute.cpp:83
const std::vector< uint64_t > & getFragOffsets()
ExecutorDeviceType
static const int max_gpu_count
Definition: Execute.h:1357
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:283
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:302
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1446
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:301
std::string to_string(char const *&&v)
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1447
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1445
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:65
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const int outer_table_id, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
#define INJECT_TIMER(DESC)
Definition: measure.h:93
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1440
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
const QueryCompilationDescriptor & query_comp_desc
const std::shared_ptr< Analyzer::Estimator > estimator
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1437
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
#define CHECK_LT(x, y)
Definition: Logger.h:299
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const FragmentsList frag_list
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
bool optimize_cuda_block_and_grid_sizes
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
#define CHECK(condition)
Definition: Logger.h:289
#define DEBUG_TIMER(name)
Definition: Logger.h:407
const char * what() const noexceptfinal
Definition: checked_alloc.h:39
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
std::mutex reduce_mutex_
SQLTypeInfo columnType
unsigned dynamic_watchdog_time_limit
static bool run
const std::vector< InputTableInfo > & query_infos_
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit, const Catalog_Namespace::Catalog *catalog)
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1441
static void computeAllTablesFragments(std::map< int, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
#define VLOG(n)
Definition: Logger.h:383
const ColumnFetcher & column_fetcher