OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ExecutionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <mutex>
20 #include <vector>
21 
25 #include "QueryEngine/Execute.h"
29 
30 namespace {
31 
33  return !res || res->definitelyHasNoRows();
34 }
35 
36 inline bool query_has_inner_join(const RelAlgExecutionUnit& ra_exe_unit) {
37  return (std::count_if(ra_exe_unit.join_quals.begin(),
38  ra_exe_unit.join_quals.end(),
39  [](const auto& join_condition) {
40  return join_condition.type == JoinType::INNER;
41  }) > 0);
42 }
43 
44 // column is part of the target expressions, result set iteration needs it alive.
46  const RelAlgExecutionUnit& ra_exe_unit,
47  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
48  const ExecutorDeviceType device_type) {
49  CHECK(chunk->getColumnDesc());
50  const auto& chunk_ti = chunk->getColumnDesc()->columnType;
51  if (device_type == ExecutorDeviceType::CPU &&
52  (chunk_ti.is_array() ||
53  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE))) {
54  for (const auto target_expr : ra_exe_unit.target_exprs) {
55  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
56  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
57  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
58  return true;
59  }
60  }
61  }
62  if (lazy_fetch_info.empty()) {
63  return false;
64  }
65  CHECK_EQ(lazy_fetch_info.size(), ra_exe_unit.target_exprs.size());
66  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); i++) {
67  const auto target_expr = ra_exe_unit.target_exprs[i];
68  const auto& col_lazy_fetch = lazy_fetch_info[i];
69  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
70  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
71  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
72  if (col_lazy_fetch.is_lazily_fetched) {
73  // hold lazy fetched inputs for later iteration
74  return true;
75  }
76  }
77  }
78  return false;
79 }
80 
81 bool need_to_hold_chunk(const std::list<std::shared_ptr<Chunk_NS::Chunk>>& chunks,
82  const RelAlgExecutionUnit& ra_exe_unit,
83  const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
84  const ExecutorDeviceType device_type) {
85  for (const auto& chunk : chunks) {
86  if (need_to_hold_chunk(chunk.get(), ra_exe_unit, lazy_fetch_info, device_type)) {
87  return true;
88  }
89  }
90 
91  return false;
92 }
93 
94 } // namespace
95 
96 const std::vector<uint64_t>& SharedKernelContext::getFragOffsets() {
97  std::lock_guard<std::mutex> lock(all_frag_row_offsets_mutex_);
98  if (all_frag_row_offsets_.empty()) {
99  all_frag_row_offsets_.resize(query_infos_.front().info.fragments.size() + 1);
100  for (size_t i = 1; i <= query_infos_.front().info.fragments.size(); ++i) {
102  all_frag_row_offsets_[i - 1] +
103  query_infos_.front().info.fragments[i - 1].getNumTuples();
104  }
105  }
106  return all_frag_row_offsets_;
107 }
108 
110  std::vector<size_t> outer_table_fragment_ids) {
111  std::lock_guard<std::mutex> lock(reduce_mutex_);
112  if (!needs_skip_result(device_results)) {
113  all_fragment_results_.emplace_back(std::move(device_results),
114  outer_table_fragment_ids);
115  }
116 }
117 
118 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
120  return all_fragment_results_;
121 }
122 
123 void ExecutionKernel::run(Executor* executor,
124  const size_t thread_idx,
125  SharedKernelContext& shared_context) {
126  DEBUG_TIMER("ExecutionKernel::run");
127  INJECT_TIMER(kernel_run);
128  std::optional<logger::QidScopeGuard> qid_scope_guard;
130  qid_scope_guard.emplace(ra_exe_unit_.query_state->setThreadLocalQueryId());
131  }
132  try {
133  runImpl(executor, thread_idx, shared_context);
134  } catch (const OutOfHostMemory& e) {
136  } catch (const std::bad_alloc& e) {
138  } catch (const OutOfRenderMemory& e) {
140  } catch (const OutOfMemory& e) {
141  throw QueryExecutionError(
143  e.what(),
147  } catch (const ColumnarConversionNotSupported& e) {
149  } catch (const TooManyLiterals& e) {
151  } catch (const StringConstInResultSet& e) {
153  } catch (const QueryExecutionError& e) {
154  throw e;
155  }
156 }
157 
158 void ExecutionKernel::runImpl(Executor* executor,
159  const size_t thread_idx,
160  SharedKernelContext& shared_context) {
161  CHECK(executor);
162  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
165  CHECK_GE(frag_list.size(), size_t(1));
166  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
167  const int outer_table_id = ra_exe_unit_.union_all
168  ? frag_list[0].table_id
169  : ra_exe_unit_.input_descs[0].getTableId();
170  CHECK_EQ(frag_list[0].table_id, outer_table_id);
171  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
172 
175 
176  auto catalog = executor->getCatalog();
177  CHECK(catalog);
178 
179  auto data_mgr = executor->getDataMgr();
180 
181  // need to own them while query executes
182  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
183  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
184  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
185  std::unique_ptr<CudaAllocator> device_allocator;
187  gpu_lock.reset(
188  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
189  device_allocator = std::make_unique<CudaAllocator>(
190  data_mgr, chosen_device_id, getQueryEngineCudaStreamForDevice(chosen_device_id));
191  }
192  std::shared_ptr<FetchResult> fetch_result(new FetchResult);
193  try {
194  std::map<int, const TableFragments*> all_tables_fragments;
196  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
197 
198  *fetch_result = ra_exe_unit_.union_all
199  ? executor->fetchUnionChunks(column_fetcher,
200  ra_exe_unit_,
202  memory_level,
203  all_tables_fragments,
204  frag_list,
205  *catalog,
206  *chunk_iterators_ptr,
207  chunks,
208  device_allocator.get(),
209  thread_idx,
211  : executor->fetchChunks(column_fetcher,
212  ra_exe_unit_,
214  memory_level,
215  all_tables_fragments,
216  frag_list,
217  *catalog,
218  *chunk_iterators_ptr,
219  chunks,
220  device_allocator.get(),
221  thread_idx,
223  if (fetch_result->num_rows.empty()) {
224  return;
225  }
227  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
230  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
232  << std::to_string(cycle_budget) << " cycles";
233  }
234  } catch (const OutOfMemory&) {
235  throw QueryExecutionError(
241  return;
242  }
243 
245  if (ra_exe_unit_.input_descs.size() > 1) {
246  throw std::runtime_error("Joins not supported through external execution");
247  }
248  const auto query = serialize_to_sql(&ra_exe_unit_, catalog);
249  GroupByAndAggregate group_by_and_aggregate(executor,
251  ra_exe_unit_,
252  shared_context.getQueryInfos(),
253  executor->row_set_mem_owner_,
254  std::nullopt);
255  const auto query_mem_desc =
256  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
258  query,
259  *fetch_result,
260  executor->plan_state_.get(),
264  executor});
265  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
266  return;
267  }
268  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
269  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
270  const bool do_render = render_info_ && render_info_->isPotentialInSituRender();
271 
272  int64_t total_num_input_rows{-1};
275  total_num_input_rows = 0;
276  std::for_each(fetch_result->num_rows.begin(),
277  fetch_result->num_rows.end(),
278  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
279  total_num_input_rows = std::accumulate(frag_row_count.begin(),
280  frag_row_count.end(),
281  total_num_input_rows);
282  });
283  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
284  // TODO(adb): we may want to take this early out for all queries, but we are most
285  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
286  // 0=1)
287  if (total_num_input_rows == 0) {
288  return;
289  }
290 
292  total_num_input_rows *= ra_exe_unit_.input_descs.size();
293  }
294  }
295 
296  uint32_t start_rowid{0};
297  if (rowid_lookup_key >= 0) {
298  if (!frag_list.empty()) {
299  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
300  start_rowid = rowid_lookup_key -
301  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
302  }
303  }
304 
305 #ifdef HAVE_TBB
306  bool can_run_subkernels = shared_context.getThreadPool() != nullptr;
307 
308  // Sub-tasks are supported for groupby queries and estimators only for now.
309  bool is_groupby =
310  (ra_exe_unit_.groupby_exprs.size() > 1) ||
311  (ra_exe_unit_.groupby_exprs.size() == 1 && ra_exe_unit_.groupby_exprs.front());
312  can_run_subkernels = can_run_subkernels && (is_groupby || ra_exe_unit_.estimator);
313 
314  // In case some column is lazily fetched, we cannot mix different fragments in a single
315  // ResultSet.
316  can_run_subkernels =
317  can_run_subkernels && !executor->hasLazyFetchColumns(ra_exe_unit_.target_exprs);
318 
319  // TODO: Use another structure to hold chunks. Currently, ResultSet holds them, but with
320  // sub-tasks chunk can be referenced by many ResultSets. So, some outer structure to
321  // hold all ResultSets and all chunks is required.
322  can_run_subkernels =
323  can_run_subkernels &&
325  chunks, ra_exe_unit_, std::vector<ColumnLazyFetchInfo>(), chosen_device_type);
326 
327  // TODO: check for literals? We serialize literals before execution and hold them in
328  // result sets. Can we simply do it once and holdin an outer structure?
329  if (can_run_subkernels) {
330  size_t total_rows = fetch_result->num_rows[0][0];
331  size_t sub_size = g_cpu_sub_task_size;
332 
333  for (size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
334  sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
335  auto subtask = std::make_shared<KernelSubtask>(*this,
336  shared_context,
337  fetch_result,
338  chunk_iterators_ptr,
339  total_num_input_rows,
340  sub_start,
341  sub_size,
342  thread_idx);
343  shared_context.getThreadPool()->run(
344  [subtask, executor] { subtask->run(executor); });
345  }
346 
347  return;
348  }
349 #endif // HAVE_TBB
350 
352  try {
353  // std::unique_ptr<QueryExecutionContext> query_exe_context_owned
354  // has std::unique_ptr<QueryMemoryInitializer> query_buffers_
355  // has std::vector<std::unique_ptr<ResultSet>> result_sets_
356  // has std::unique_ptr<ResultSetStorage> storage_
357  // which are initialized and possibly allocated here.
358  query_exe_context_owned =
360  executor,
364  outer_table_id,
365  total_num_input_rows,
366  fetch_result->col_buffers,
367  fetch_result->frag_offsets,
368  executor->getRowSetMemoryOwner(),
369  compilation_result.output_columnar,
371  thread_idx,
372  do_render ? render_info_ : nullptr);
373  } catch (const OutOfHostMemory& e) {
375  }
376  }
377  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
378  CHECK(query_exe_context);
379  int32_t err{0};
380 
381  if (ra_exe_unit_.groupby_exprs.empty()) {
382  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
383  compilation_result,
388  fetch_result->col_buffers,
389  query_exe_context,
390  fetch_result->num_rows,
391  fetch_result->frag_offsets,
392  data_mgr,
394  start_rowid,
395  ra_exe_unit_.input_descs.size(),
397  do_render ? render_info_ : nullptr);
398  } else {
399  if (ra_exe_unit_.union_all) {
400  VLOG(1) << "outer_table_id=" << outer_table_id
401  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
402  }
403  err = executor->executePlanWithGroupBy(ra_exe_unit_,
404  compilation_result,
408  fetch_result->col_buffers,
409  outer_tab_frag_ids,
410  query_exe_context,
411  fetch_result->num_rows,
412  fetch_result->frag_offsets,
413  data_mgr,
415  outer_table_id,
417  start_rowid,
418  ra_exe_unit_.input_descs.size(),
420  do_render ? render_info_ : nullptr);
421  }
422  if (device_results_) {
423  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
424  for (const auto& chunk : chunks) {
425  if (need_to_hold_chunk(chunk.get(),
426  ra_exe_unit_,
427  device_results_->getLazyFetchInfo(),
429  chunks_to_hold.push_back(chunk);
430  }
431  }
432  device_results_->holdChunks(chunks_to_hold);
433  device_results_->holdChunkIterators(chunk_iterators_ptr);
434  } else {
435  VLOG(1) << "null device_results.";
436  }
437  if (err) {
438  throw QueryExecutionError(err);
439  }
440  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
441 }
442 
443 #ifdef HAVE_TBB
444 
445 void KernelSubtask::run(Executor* executor) {
446  try {
447  runImpl(executor);
448  } catch (const OutOfHostMemory& e) {
450  } catch (const std::bad_alloc& e) {
452  } catch (const OutOfRenderMemory& e) {
454  } catch (const OutOfMemory& e) {
455  throw QueryExecutionError(
457  e.what(),
459  kernel_.query_mem_desc.getQueryDescriptionType(),
460  kernel_.kernel_dispatch_mode == ExecutorDispatchMode::MultifragmentKernel});
461  } catch (const ColumnarConversionNotSupported& e) {
463  } catch (const TooManyLiterals& e) {
465  } catch (const StringConstInResultSet& e) {
467  } catch (const QueryExecutionError& e) {
468  throw e;
469  }
470 }
471 
472 void KernelSubtask::runImpl(Executor* executor) {
473  auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
474  const bool do_render =
475  kernel_.render_info_ && kernel_.render_info_->isPotentialInSituRender();
476  const CompilationResult& compilation_result =
477  kernel_.query_comp_desc.getCompilationResult();
478  const int outer_table_id = kernel_.ra_exe_unit_.union_all
479  ? kernel_.frag_list[0].table_id
480  : kernel_.ra_exe_unit_.input_descs[0].getTableId();
481 
482  if (!query_exe_context_owned) {
483  try {
484  // We pass fake col_buffers and frag_offsets. These are not actually used
485  // for subtasks but shouldn't pass empty structures to avoid empty results.
486  std::vector<std::vector<const int8_t*>> col_buffers(
487  fetch_result_->col_buffers.size(),
488  std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
489  std::vector<std::vector<uint64_t>> frag_offsets(
490  fetch_result_->frag_offsets.size(),
491  std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
492  query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
493  kernel_.ra_exe_unit_,
494  executor,
495  kernel_.chosen_device_type,
496  kernel_.kernel_dispatch_mode,
497  kernel_.chosen_device_id,
498  outer_table_id,
499  total_num_input_rows_,
500  col_buffers,
501  frag_offsets,
502  executor->getRowSetMemoryOwner(),
503  compilation_result.output_columnar,
504  kernel_.query_mem_desc.sortOnGpu(),
505  // TODO: use TBB thread id to choose allocator
506  thread_idx_,
507  do_render ? kernel_.render_info_ : nullptr);
508  } catch (const OutOfHostMemory& e) {
510  }
511  }
512 
513  const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
514  auto catalog = executor->getCatalog();
515  CHECK(catalog);
516  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
517  CHECK(query_exe_context);
518  int32_t err{0};
519 
520  if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
521  err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
522  compilation_result,
523  kernel_.query_comp_desc.hoistLiterals(),
524  nullptr,
525  kernel_.ra_exe_unit_.target_exprs,
526  kernel_.chosen_device_type,
527  fetch_result_->col_buffers,
528  query_exe_context,
529  fetch_result_->num_rows,
530  fetch_result_->frag_offsets,
531  &catalog->getDataMgr(),
532  kernel_.chosen_device_id,
533  start_rowid_,
534  kernel_.ra_exe_unit_.input_descs.size(),
535  kernel_.eo.allow_runtime_query_interrupt,
536  do_render ? kernel_.render_info_ : nullptr,
537  start_rowid_ + num_rows_to_process_);
538  } else {
539  err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
540  compilation_result,
541  kernel_.query_comp_desc.hoistLiterals(),
542  nullptr,
543  kernel_.chosen_device_type,
544  fetch_result_->col_buffers,
545  outer_tab_frag_ids,
546  query_exe_context,
547  fetch_result_->num_rows,
548  fetch_result_->frag_offsets,
549  &catalog->getDataMgr(),
550  kernel_.chosen_device_id,
551  outer_table_id,
552  kernel_.ra_exe_unit_.scan_limit,
553  start_rowid_,
554  kernel_.ra_exe_unit_.input_descs.size(),
555  kernel_.eo.allow_runtime_query_interrupt,
556  do_render ? kernel_.render_info_ : nullptr,
557  start_rowid_ + num_rows_to_process_);
558  }
559 
560  if (err) {
561  throw QueryExecutionError(err);
562  }
563 }
564 
565 #endif // HAVE_TBB
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:231
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
Definition: Execute.cpp:83
const std::vector< uint64_t > & getFragOffsets()
ExecutorDeviceType
static const int max_gpu_count
Definition: Execute.h:1255
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:217
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:236
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1353
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:235
std::string to_string(char const *&&v)
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1354
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1352
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:65
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const int outer_table_id, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
#define INJECT_TIMER(DESC)
Definition: measure.h:93
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1347
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
const QueryCompilationDescriptor & query_comp_desc
const std::shared_ptr< Analyzer::Estimator > estimator
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1344
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
#define CHECK_LT(x, y)
Definition: Logger.h:233
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const FragmentsList frag_list
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
bool isPotentialInSituRender() const
Definition: RenderInfo.cpp:68
#define CHECK(condition)
Definition: Logger.h:223
#define DEBUG_TIMER(name)
Definition: Logger.h:370
const char * what() const noexceptfinal
Definition: checked_alloc.h:39
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
std::mutex reduce_mutex_
std::shared_ptr< const query_state::QueryState > query_state
SQLTypeInfo columnType
unsigned dynamic_watchdog_time_limit
static bool run
const std::vector< InputTableInfo > & query_infos_
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit, const Catalog_Namespace::Catalog *catalog)
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1348
static void computeAllTablesFragments(std::map< int, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
#define VLOG(n)
Definition: Logger.h:317
const ColumnFetcher & column_fetcher