OmniSciDB  340b00dbf6
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ExecutionKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <mutex>
20 #include <vector>
21 
25 #include "QueryEngine/Execute.h"
28 
29 namespace {
30 
32  return !res || res->definitelyHasNoRows();
33 }
34 
35 inline bool query_has_inner_join(const RelAlgExecutionUnit& ra_exe_unit) {
36  return (std::count_if(ra_exe_unit.join_quals.begin(),
37  ra_exe_unit.join_quals.end(),
38  [](const auto& join_condition) {
39  return join_condition.type == JoinType::INNER;
40  }) > 0);
41 }
42 
43 // column is part of the target expressions, result set iteration needs it alive.
45  const RelAlgExecutionUnit& ra_exe_unit) {
46  CHECK(chunk->getColumnDesc());
47  const auto chunk_ti = chunk->getColumnDesc()->columnType;
48  if (chunk_ti.is_array() ||
49  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE)) {
50  for (const auto target_expr : ra_exe_unit.target_exprs) {
51  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
52  if (col_var && col_var->get_column_id() == chunk->getColumnDesc()->columnId &&
53  col_var->get_table_id() == chunk->getColumnDesc()->tableId) {
54  return true;
55  }
56  }
57  }
58  return false;
59 }
60 
61 } // namespace
62 
63 const std::vector<uint64_t>& SharedKernelContext::getFragOffsets() {
64  std::lock_guard<std::mutex> lock(all_frag_row_offsets_mutex_);
65  if (all_frag_row_offsets_.empty()) {
66  all_frag_row_offsets_.resize(query_infos_.front().info.fragments.size() + 1);
67  for (size_t i = 1; i <= query_infos_.front().info.fragments.size(); ++i) {
69  all_frag_row_offsets_[i - 1] +
70  query_infos_.front().info.fragments[i - 1].getNumTuples();
71  }
72  }
73  return all_frag_row_offsets_;
74 }
75 
77  std::vector<size_t> outer_table_fragment_ids) {
78  std::lock_guard<std::mutex> lock(reduce_mutex_);
79  if (!needs_skip_result(device_results)) {
80  all_fragment_results_.emplace_back(std::move(device_results),
81  outer_table_fragment_ids);
82  }
83 }
84 
85 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
87  return all_fragment_results_;
88 }
89 
90 void ExecutionKernel::run(Executor* executor, SharedKernelContext& shared_context) {
91  DEBUG_TIMER("ExecutionKernel::run");
92  INJECT_TIMER(kernel_run);
93  try {
94  runImpl(executor, shared_context);
95  } catch (const OutOfHostMemory& e) {
97  } catch (const std::bad_alloc& e) {
99  } catch (const OutOfRenderMemory& e) {
101  } catch (const OutOfMemory& e) {
102  throw QueryExecutionError(
104  e.what(),
108  } catch (const ColumnarConversionNotSupported& e) {
110  } catch (const TooManyLiterals& e) {
112  } catch (const SringConstInResultSet& e) {
114  } catch (const QueryExecutionError& e) {
115  throw e;
116  }
117 }
118 
119 void ExecutionKernel::runImpl(Executor* executor, SharedKernelContext& shared_context) {
120  CHECK(executor);
121  const auto memory_level = chosen_device_type == ExecutorDeviceType::GPU
124  CHECK_GE(frag_list.size(), size_t(1));
125  // frag_list[0].table_id is how we tell which query we are running for UNION ALL.
126  const int outer_table_id = ra_exe_unit_.union_all
127  ? frag_list[0].table_id
128  : ra_exe_unit_.input_descs[0].getTableId();
129  CHECK_EQ(frag_list[0].table_id, outer_table_id);
130  const auto& outer_tab_frag_ids = frag_list[0].fragment_ids;
131 
134 
135  auto catalog = executor->getCatalog();
136  CHECK(catalog);
137 
138  // need to own them while query executes
139  auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
140  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
141  std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
142  std::unique_ptr<CudaAllocator> device_allocator;
144  gpu_lock.reset(
145  new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[chosen_device_id]));
146  device_allocator =
147  std::make_unique<CudaAllocator>(&catalog->getDataMgr(), chosen_device_id);
148  }
149  FetchResult fetch_result;
150  try {
151  std::map<int, const TableFragments*> all_tables_fragments;
153  all_tables_fragments, ra_exe_unit_, shared_context.getQueryInfos());
154 
155  fetch_result = ra_exe_unit_.union_all
156  ? executor->fetchUnionChunks(column_fetcher,
157  ra_exe_unit_,
159  memory_level,
160  all_tables_fragments,
161  frag_list,
162  *catalog,
163  *chunk_iterators_ptr,
164  chunks,
165  device_allocator.get())
166  : executor->fetchChunks(column_fetcher,
167  ra_exe_unit_,
169  memory_level,
170  all_tables_fragments,
171  frag_list,
172  *catalog,
173  *chunk_iterators_ptr,
174  chunks,
175  device_allocator.get());
176  if (fetch_result.num_rows.empty()) {
177  return;
178  }
180  !shared_context.dynamic_watchdog_set.test_and_set(std::memory_order_acquire)) {
183  LOG(INFO) << "Dynamic Watchdog budget: CPU: "
185  << std::to_string(cycle_budget) << " cycles";
186  }
187  } catch (const OutOfMemory&) {
188  throw QueryExecutionError(
194  return;
195  }
196 
198  if (ra_exe_unit_.input_descs.size() > 1) {
199  throw std::runtime_error("Joins not supported through external execution");
200  }
201  const auto query = serialize_to_sql(&ra_exe_unit_, catalog);
202  GroupByAndAggregate group_by_and_aggregate(executor,
204  ra_exe_unit_,
205  shared_context.getQueryInfos(),
206  executor->row_set_mem_owner_,
207  std::nullopt);
208  const auto query_mem_desc =
209  group_by_and_aggregate.initQueryMemoryDescriptor(false, 0, 8, nullptr, false);
211  query,
212  fetch_result,
213  executor->plan_state_.get(),
217  executor});
218  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
219  return;
220  }
221  const CompilationResult& compilation_result = query_comp_desc.getCompilationResult();
222  std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
223  const bool do_render = render_info_ && render_info_->isPotentialInSituRender();
224 
225  int64_t total_num_input_rows{-1};
228  total_num_input_rows = 0;
229  std::for_each(fetch_result.num_rows.begin(),
230  fetch_result.num_rows.end(),
231  [&total_num_input_rows](const std::vector<int64_t>& frag_row_count) {
232  total_num_input_rows = std::accumulate(frag_row_count.begin(),
233  frag_row_count.end(),
234  total_num_input_rows);
235  });
236  VLOG(2) << "total_num_input_rows=" << total_num_input_rows;
237  // TODO(adb): we may want to take this early out for all queries, but we are most
238  // likely to see this query pattern on the kernel per fragment path (e.g. with HAVING
239  // 0=1)
240  if (total_num_input_rows == 0) {
241  return;
242  }
243 
245  total_num_input_rows *= ra_exe_unit_.input_descs.size();
246  }
247  }
248 
250  try {
251  query_exe_context_owned =
253  executor,
257  total_num_input_rows,
258  fetch_result.col_buffers,
259  fetch_result.frag_offsets,
260  executor->getRowSetMemoryOwner(),
261  compilation_result.output_columnar,
263  do_render ? render_info_ : nullptr);
264  } catch (const OutOfHostMemory& e) {
266  }
267  }
268  QueryExecutionContext* query_exe_context{query_exe_context_owned.get()};
269  CHECK(query_exe_context);
270  int32_t err{0};
271  uint32_t start_rowid{0};
272  if (rowid_lookup_key >= 0) {
273  if (!frag_list.empty()) {
274  const auto& all_frag_row_offsets = shared_context.getFragOffsets();
275  start_rowid = rowid_lookup_key -
276  all_frag_row_offsets[frag_list.begin()->fragment_ids.front()];
277  }
278  }
279 
280  if (ra_exe_unit_.groupby_exprs.empty()) {
281  err = executor->executePlanWithoutGroupBy(ra_exe_unit_,
282  compilation_result,
287  fetch_result.col_buffers,
288  query_exe_context,
289  fetch_result.num_rows,
290  fetch_result.frag_offsets,
291  &catalog->getDataMgr(),
293  start_rowid,
294  ra_exe_unit_.input_descs.size(),
295  do_render ? render_info_ : nullptr);
296  } else {
297  if (ra_exe_unit_.union_all) {
298  VLOG(1) << "outer_table_id=" << outer_table_id
299  << " ra_exe_unit_.scan_limit=" << ra_exe_unit_.scan_limit;
300  }
301  err = executor->executePlanWithGroupBy(ra_exe_unit_,
302  compilation_result,
306  fetch_result.col_buffers,
307  outer_tab_frag_ids,
308  query_exe_context,
309  fetch_result.num_rows,
310  fetch_result.frag_offsets,
311  &catalog->getDataMgr(),
313  outer_table_id,
315  start_rowid,
316  ra_exe_unit_.input_descs.size(),
317  do_render ? render_info_ : nullptr);
318  }
319  if (device_results_) {
320  std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
321  for (const auto& chunk : chunks) {
322  if (need_to_hold_chunk(chunk.get(), ra_exe_unit_)) {
323  chunks_to_hold.push_back(chunk);
324  }
325  }
326  device_results_->holdChunks(chunks_to_hold);
327  device_results_->holdChunkIterators(chunk_iterators_ptr);
328  } else {
329  VLOG(1) << "null device_results.";
330  }
331  if (err) {
332  throw QueryExecutionError(err);
333  }
334  shared_context.addDeviceResults(std::move(device_results_), outer_tab_frag_ids);
335 }
std::vector< Analyzer::Expr * > target_exprs
virtual const char * what() const noexceptfinal
Definition: checked_alloc.h:38
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
const std::vector< uint64_t > & getFragOffsets()
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, RenderInfo *) const
static const int max_gpu_count
Definition: Execute.h:921
const std::optional< bool > union_all
#define LOG(tag)
Definition: Logger.h:188
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
#define CHECK_GE(x, y)
Definition: Logger.h:210
void runImpl(Executor *executor, SharedKernelContext &shared_context)
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:995
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
#define CHECK_GT(x, y)
Definition: Logger.h:209
std::string to_string(char const *&&v)
void run(Executor *executor, SharedKernelContext &shared_context)
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:996
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:994
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:53
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
#define INJECT_TIMER(DESC)
Definition: measure.h:93
const bool with_dynamic_watchdog
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:989
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
const QueryCompilationDescriptor & query_comp_desc
uint64_t dynamic_watchdog_init(unsigned ms_budget)
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:986
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit)
QueryDescriptionType getQueryDescriptionType() const
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
const FragmentsList frag_list
std::vector< std::vector< const int8_t * > > col_buffers
Definition: ColumnFetcher.h:42
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
bool isPotentialInSituRender() const
Definition: RenderInfo.cpp:64
#define CHECK(condition)
Definition: Logger.h:197
#define DEBUG_TIMER(name)
Definition: Logger.h:313
std::vector< std::vector< int64_t > > num_rows
Definition: ColumnFetcher.h:43
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
std::mutex reduce_mutex_
SQLTypeInfo columnType
std::vector< std::vector< uint64_t > > frag_offsets
Definition: ColumnFetcher.h:44
const std::vector< InputTableInfo > & query_infos_
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit, const Catalog_Namespace::Catalog *catalog)
const unsigned dynamic_watchdog_time_limit
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:990
static void computeAllTablesFragments(std::map< int, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
#define VLOG(n)
Definition: Logger.h:291
const ColumnFetcher & column_fetcher