OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ExecutionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <mutex>
20 #include <vector>
21 
25 #include "QueryEngine/Execute.h"
29 
30 namespace {
31 
33  return !res || res->definitelyHasNoRows();
34 }
35 
36 inline bool query_has_inner_join(const RelAlgExecutionUnit& ra_exe_unit) {
37  return (std::count_if(ra_exe_unit.join_quals.begin(),
38  ra_exe_unit.join_quals.end(),
39  [](const auto& join_condition) {
40  return join_condition.type == JoinType::INNER;
41  }) > 0);
42 }
43 
44 // column is part of the target expressions, result set iteration needs it alive.
46  const RelAlgExecutionUnit& ra_exe_unit,
47  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
48  const ExecutorDeviceType device_type) {
49  CHECK(chunk->getColumnDesc());
50  const auto& chunk_ti = chunk->getColumnDesc()->columnType;
51  if (device_type == ExecutorDeviceType::CPU &&
52  (chunk_ti.is_array() ||
53  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE))) {
54  for (const auto target_expr : ra_exe_unit.target_exprs) {
55  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
56  if (col_var) {
57  const auto& column_key = col_var->getColumnKey();
58  return column_key.column_id == chunk->getColumnDesc()->columnId &&
59  column_key.table_id == chunk->getColumnDesc()->tableId &&
60  column_key.db_id == chunk->getColumnDesc()->db_id;
61  }
62  }
63  }
64  if (lazy_fetch_info.empty()) {
65  return false;
66  }
67  CHECK_EQ(lazy_fetch_info.size(), ra_exe_unit.target_exprs.size());
68  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); i++) {
69  const auto target_expr = ra_exe_unit.target_exprs[i];
70  const auto& col_lazy_fetch = lazy_fetch_info[i];
71  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
72  if (col_var) {
73  const auto& column_key = col_var->getColumnKey();
74  if (column_key.column_id == chunk->getColumnDesc()->columnId &&
75  column_key.table_id == chunk->getColumnDesc()->tableId &&
76  column_key.db_id == chunk->getColumnDesc()->db_id) {
77  if (col_lazy_fetch.is_lazily_fetched) {
78  // hold lazy fetched inputs for later iteration
79  return true;
80  }
81  }
82  }
83  }
84  return false;
85 }
86 
87 bool need_to_hold_chunk(const std::list<std::shared_ptr<Chunk_NS::Chunk>>& chunks,
88  const RelAlgExecutionUnit& ra_exe_unit,
89  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
90  const ExecutorDeviceType device_type) {
91  for (const auto& chunk : chunks) {
92  if (need_to_hold_chunk(chunk.get(), ra_exe_unit, lazy_fetch_info, device_type)) {
93  return true;
94  }
95  }
96 
97  return false;
98 }
99 
100 } // namespace
101 
102 const std::vector<uint64_t>& SharedKernelContext::getFragOffsets() {
103  std::lock_guard<std::mutex> lock(all_frag_row_offsets_mutex_);
104  if (all_frag_row_offsets_.empty()) {
105  all_frag_row_offsets_.resize(query_infos_.front().info.fragments.size() + 1);
106  for (size_t i = 1; i <= query_infos_.front().info.fragments.size(); ++i) {
108  all_frag_row_offsets_[i - 1] +
109  query_infos_.front().info.fragments[i - 1].getNumTuples();
110  }
111  }
112  return all_frag_row_offsets_;
113 }
114 
116  std::vector<size_t> outer_table_fragment_ids) {
117  std::lock_guard<std::mutex> lock(reduce_mutex_);
118  if (!needs_skip_result(device_results)) {
119  all_fragment_results_.emplace_back(std::move(device_results),
120  outer_table_fragment_ids);
121  }
122 }
123 
124 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
126  return all_fragment_results_;
127 }
128 
129 void ExecutionKernel::run(Executor* executor,
130  const size_t thread_idx,
131  SharedKernelContext& shared_context) {
132  DEBUG_TIMER("ExecutionKernel::run");
133  INJECT_TIMER(kernel_run);
134  try {
135  runImpl(executor, thread_idx, shared_context);
136  } catch (const OutOfHostMemory& e) {
138  } catch (const std::bad_alloc& e) {
140  } catch (const OutOfRenderMemory& e) {
142  } catch (const OutOfMemory& e) {
143  throw QueryExecutionError(
145  e.what(),
149  } catch (const ColumnarConversionNotSupported& e) {
151  } catch (const TooManyLiterals& e) {
153  } catch (const StringConstInResultSet& e) {
155  } catch (const QueryExecutionError& e) {
156  throw e;
157  }
158 }
159 
160 void ExecutionKernel::runImpl(Executor* executor,
161  const size_t thread_idx,
162  SharedKernelContext& shared_context) {
163  CHECK(executor);
164  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
167  CHECK_GE(frag_list.size(), size_t(1));
168  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
169  const auto& outer_table_key = ra_exe_unit_.union_all
170  ? frag_list[0].table_key
171  : ra_exe_unit_.input_descs[0].getTableKey();
172  CHECK_EQ(frag_list[0].table_key, outer_table_key);
173  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
174 
177 
178  auto data_mgr = executor->getDataMgr();
179 
180  // need to own them while query executes
181  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
182  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
183  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
184  std::unique_ptr<CudaAllocator> device_allocator;
186  gpu_lock.reset(
187  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
188  device_allocator = std::make_unique<CudaAllocator>(
189  data_mgr, chosen_device_id, getQueryEngineCudaStreamForDevice(chosen_device_id));
190  }
191  std::shared_ptr<FetchResult> fetch_result(new FetchResult);
192  try {
193  std::map<shared::TableKey, const TableFragments*> all_tables_fragments;
195  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
196 
197  *fetch_result = ra_exe_unit_.union_all
198  ? executor->fetchUnionChunks(column_fetcher,
199  ra_exe_unit_,
201  memory_level,
202  all_tables_fragments,
203  frag_list,
204  *chunk_iterators_ptr,
205  chunks,
206  device_allocator.get(),
207  thread_idx,
209  : executor->fetchChunks(column_fetcher,
210  ra_exe_unit_,
212  memory_level,
213  all_tables_fragments,
214  frag_list,
215  *chunk_iterators_ptr,
216  chunks,
217  device_allocator.get(),
218  thread_idx,
220  if (fetch_result->num_rows.empty()) {
221  return;
222  }
224  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
227  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
229  << std::to_string(cycle_budget) << " cycles";
230  }
231  } catch (const OutOfMemory&) {
232  throw QueryExecutionError(
238  return;
239  }
240 
242  if (ra_exe_unit_.input_descs.size() > 1) {
243  throw std::runtime_error("Joins not supported through external execution");
244  }
245  const auto query = serialize_to_sql(&ra_exe_unit_);
246  GroupByAndAggregate group_by_and_aggregate(executor,
248  ra_exe_unit_,
249  shared_context.getQueryInfos(),
250  executor->row_set_mem_owner_,
251  std::nullopt);
252  const auto query_mem_desc =
253  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
255  query,
256  *fetch_result,
257  executor->plan_state_.get(),
261  executor});
262  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
263  return;
264  }
265  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
266  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
267  const bool do_render = render_info_ && render_info_->isInSitu();
268 
269  int64_t total_num_input_rows{-1};
272  total_num_input_rows = 0;
273  std::for_each(fetch_result->num_rows.begin(),
274  fetch_result->num_rows.end(),
275  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
276  total_num_input_rows = std::accumulate(frag_row_count.begin(),
277  frag_row_count.end(),
278  total_num_input_rows);
279  });
280  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
281  // TODO(adb): we may want to take this early out for all queries, but we are most
282  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
283  // 0=1)
284  if (total_num_input_rows == 0) {
285  return;
286  }
287 
289  total_num_input_rows *= ra_exe_unit_.input_descs.size();
290  }
291  }
292 
293  uint32_t start_rowid{0};
294  if (rowid_lookup_key >= 0) {
295  if (!frag_list.empty()) {
296  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
297  start_rowid = rowid_lookup_key -
298  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
299  }
300  }
301 
302 #ifdef HAVE_TBB
303  bool can_run_subkernels = shared_context.getThreadPool() != nullptr;
304 
305  // Sub-tasks are supported for groupby queries and estimators only for now.
306  bool is_groupby =
307  (ra_exe_unit_.groupby_exprs.size() > 1) ||
308  (ra_exe_unit_.groupby_exprs.size() == 1 && ra_exe_unit_.groupby_exprs.front());
309  can_run_subkernels = can_run_subkernels && (is_groupby || ra_exe_unit_.estimator);
310 
311  // In case some column is lazily fetched, we cannot mix different fragments in a single
312  // ResultSet.
313  can_run_subkernels =
314  can_run_subkernels && !executor->hasLazyFetchColumns(ra_exe_unit_.target_exprs);
315 
316  // TODO: Use another structure to hold chunks. Currently, ResultSet holds them, but with
317  // sub-tasks chunk can be referenced by many ResultSets. So, some outer structure to
318  // hold all ResultSets and all chunks is required.
319  can_run_subkernels =
320  can_run_subkernels &&
322  chunks, ra_exe_unit_, std::vector<ColumnLazyFetchInfo>(), chosen_device_type);
323 
324  // TODO: check for literals? We serialize literals before execution and hold them in
325  // result sets. Can we simply do it once and holdin an outer structure?
326  if (can_run_subkernels) {
327  size_t total_rows = fetch_result->num_rows[0][0];
328  size_t sub_size = g_cpu_sub_task_size;
329 
330  for (size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
331  sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
332  auto subtask = std::make_shared<KernelSubtask>(*this,
333  shared_context,
334  fetch_result,
335  chunk_iterators_ptr,
336  total_num_input_rows,
337  sub_start,
338  sub_size,
339  thread_idx);
340  shared_context.getThreadPool()->run(
341  [subtask, executor] { subtask->run(executor); });
342  }
343 
344  return;
345  }
346 #endif // HAVE_TBB
347 
349  try {
350  // std::unique_ptr<QueryExecutionContext> query_exe_context_owned
351  // has std::unique_ptr<QueryMemoryInitializer> query_buffers_
352  // has std::vector<std::unique_ptr<ResultSet>> result_sets_
353  // has std::unique_ptr<ResultSetStorage> storage_
354  // which are initialized and possibly allocated here.
355  query_exe_context_owned =
357  executor,
361  outer_table_key,
362  total_num_input_rows,
363  fetch_result->col_buffers,
364  fetch_result->frag_offsets,
365  executor->getRowSetMemoryOwner(),
366  compilation_result.output_columnar,
368  thread_idx,
369  do_render ? render_info_ : nullptr);
370  } catch (const OutOfHostMemory& e) {
372  }
373  }
374  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
375  CHECK(query_exe_context);
376  int32_t err{0};
377  bool optimize_cuda_block_and_grid_sizes =
380 
381  if (ra_exe_unit_.groupby_exprs.empty()) {
382  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
383  compilation_result,
388  fetch_result->col_buffers,
389  query_exe_context,
390  fetch_result->num_rows,
391  fetch_result->frag_offsets,
392  data_mgr,
394  start_rowid,
395  ra_exe_unit_.input_descs.size(),
397  do_render ? render_info_ : nullptr,
398  optimize_cuda_block_and_grid_sizes);
399  } else {
400  if (ra_exe_unit_.union_all) {
401  VLOG(1) << "outer_table_key=" << outer_table_key
402  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
403  }
404  err = executor->executePlanWithGroupBy(ra_exe_unit_,
405  compilation_result,
409  fetch_result->col_buffers,
410  outer_tab_frag_ids,
411  query_exe_context,
412  fetch_result->num_rows,
413  fetch_result->frag_offsets,
414  data_mgr,
416  outer_table_key,
418  start_rowid,
419  ra_exe_unit_.input_descs.size(),
421  do_render ? render_info_ : nullptr,
422  optimize_cuda_block_and_grid_sizes);
423  }
424  if (device_results_) {
425  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
426  for (const auto& chunk : chunks) {
427  if (need_to_hold_chunk(chunk.get(),
428  ra_exe_unit_,
429  device_results_->getLazyFetchInfo(),
431  chunks_to_hold.push_back(chunk);
432  }
433  }
434  device_results_->holdChunks(chunks_to_hold);
435  device_results_->holdChunkIterators(chunk_iterators_ptr);
436  } else {
437  VLOG(1) << "null device_results.";
438  }
439  if (err) {
440  throw QueryExecutionError(err);
441  }
442  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
443 }
444 
445 #ifdef HAVE_TBB
446 
447 void KernelSubtask::run(Executor* executor) {
448  try {
449  runImpl(executor);
450  } catch (const OutOfHostMemory& e) {
452  } catch (const std::bad_alloc& e) {
454  } catch (const OutOfRenderMemory& e) {
456  } catch (const OutOfMemory& e) {
457  throw QueryExecutionError(
459  e.what(),
461  kernel_.query_mem_desc.getQueryDescriptionType(),
462  kernel_.kernel_dispatch_mode == ExecutorDispatchMode::MultifragmentKernel});
463  } catch (const ColumnarConversionNotSupported& e) {
465  } catch (const TooManyLiterals& e) {
467  } catch (const StringConstInResultSet& e) {
469  } catch (const QueryExecutionError& e) {
470  throw e;
471  }
472 }
473 
474 void KernelSubtask::runImpl(Executor* executor) {
475  auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
476  const bool do_render = kernel_.render_info_ && kernel_.render_info_->isInSitu();
477  const CompilationResult& compilation_result =
478  kernel_.query_comp_desc.getCompilationResult();
479  const shared::TableKey& outer_table_key =
480  kernel_.ra_exe_unit_.union_all ? kernel_.frag_list[0].table_key
481  : kernel_.ra_exe_unit_.input_descs[0].getTableKey();
482 
483  if (!query_exe_context_owned) {
484  try {
485  // We pass fake col_buffers and frag_offsets. These are not actually used
486  // for subtasks but shouldn't pass empty structures to avoid empty results.
487  std::vector<std::vector<const int8_t*>> col_buffers(
488  fetch_result_->col_buffers.size(),
489  std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
490  std::vector<std::vector<uint64_t>> frag_offsets(
491  fetch_result_->frag_offsets.size(),
492  std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
493  query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
494  kernel_.ra_exe_unit_,
495  executor,
496  kernel_.chosen_device_type,
497  kernel_.kernel_dispatch_mode,
498  kernel_.chosen_device_id,
499  outer_table_key,
500  total_num_input_rows_,
501  col_buffers,
502  frag_offsets,
503  executor->getRowSetMemoryOwner(),
504  compilation_result.output_columnar,
505  kernel_.query_mem_desc.sortOnGpu(),
506  // TODO: use TBB thread id to choose allocator
507  thread_idx_,
508  do_render ? kernel_.render_info_ : nullptr);
509  } catch (const OutOfHostMemory& e) {
511  }
512  }
513 
514  const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
515  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
516  CHECK(query_exe_context);
517  int32_t err{0};
518  bool optimize_cuda_block_and_grid_sizes =
519  kernel_.chosen_device_type == ExecutorDeviceType::GPU &&
520  kernel_.eo.optimize_cuda_block_and_grid_sizes;
521  if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
522  err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
523  compilation_result,
524  kernel_.query_comp_desc.hoistLiterals(),
525  nullptr,
526  kernel_.ra_exe_unit_.target_exprs,
527  kernel_.chosen_device_type,
528  fetch_result_->col_buffers,
529  query_exe_context,
530  fetch_result_->num_rows,
531  fetch_result_->frag_offsets,
532  executor->getDataMgr(),
533  kernel_.chosen_device_id,
534  start_rowid_,
535  kernel_.ra_exe_unit_.input_descs.size(),
536  kernel_.eo.allow_runtime_query_interrupt,
537  do_render ? kernel_.render_info_ : nullptr,
538  optimize_cuda_block_and_grid_sizes,
539  start_rowid_ + num_rows_to_process_);
540  } else {
541  err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
542  compilation_result,
543  kernel_.query_comp_desc.hoistLiterals(),
544  nullptr,
545  kernel_.chosen_device_type,
546  fetch_result_->col_buffers,
547  outer_tab_frag_ids,
548  query_exe_context,
549  fetch_result_->num_rows,
550  fetch_result_->frag_offsets,
551  executor->getDataMgr(),
552  kernel_.chosen_device_id,
553  outer_table_key,
554  kernel_.ra_exe_unit_.scan_limit,
555  start_rowid_,
556  kernel_.ra_exe_unit_.input_descs.size(),
557  kernel_.eo.allow_runtime_query_interrupt,
558  do_render ? kernel_.render_info_ : nullptr,
559  optimize_cuda_block_and_grid_sizes,
560  start_rowid_ + num_rows_to_process_);
561  }
562 
563  if (err) {
564  throw QueryExecutionError(err);
565  }
566 }
567 
568 #endif // HAVE_TBB
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
Definition: Execute.cpp:83
const std::vector< uint64_t > & getFragOffsets()
ExecutorDeviceType
static const int max_gpu_count
Definition: Execute.h:1350
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:285
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:306
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1438
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::string to_string(char const *&&v)
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1439
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1437
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:65
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
#define INJECT_TIMER(DESC)
Definition: measure.h:93
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1432
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
const QueryCompilationDescriptor & query_comp_desc
static void computeAllTablesFragments(std::map< shared::TableKey, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
const std::shared_ptr< Analyzer::Estimator > estimator
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1429
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
#define CHECK_LT(x, y)
Definition: Logger.h:303
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const shared::ColumnKey & getColumnKey() const
Definition: Analyzer.h:198
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const FragmentsList frag_list
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
bool optimize_cuda_block_and_grid_sizes
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
const char * what() const noexceptfinal
Definition: checked_alloc.h:39
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
std::mutex reduce_mutex_
SQLTypeInfo columnType
unsigned dynamic_watchdog_time_limit
static bool run
const std::vector< InputTableInfo > & query_infos_
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1433
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const shared::TableKey &outer_table_key, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
#define VLOG(n)
Definition: Logger.h:387
const ColumnFetcher & column_fetcher