33 return !res || res->definitelyHasNoRows();
37 return (std::count_if(ra_exe_unit.
join_quals.begin(),
39 [](
const auto& join_condition) {
47 const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
52 (chunk_ti.is_array() ||
53 (chunk_ti.is_string() && chunk_ti.get_compression() ==
kENCODING_NONE))) {
54 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
64 if (lazy_fetch_info.empty()) {
68 for (
size_t i = 0; i < ra_exe_unit.
target_exprs.size(); i++) {
70 const auto& col_lazy_fetch = lazy_fetch_info[i];
77 if (col_lazy_fetch.is_lazily_fetched) {
89 const std::vector<ColumnLazyFetchInfo>& lazy_fetch_info,
91 for (
const auto& chunk : chunks) {
106 for (
size_t i = 1; i <=
query_infos_.front().info.fragments.size(); ++i) {
109 query_infos_.front().info.fragments[i - 1].getNumTuples();
116 std::vector<size_t> outer_table_fragment_ids) {
120 outer_table_fragment_ids);
124 std::vector<std::pair<ResultSetPtr, std::vector<size_t>>>&
130 const size_t thread_idx,
135 runImpl(executor, thread_idx, shared_context);
138 }
catch (
const std::bad_alloc& e) {
161 const size_t thread_idx,
173 const auto& outer_tab_frag_ids =
frag_list[0].fragment_ids;
178 auto data_mgr = executor->getDataMgr();
181 auto chunk_iterators_ptr = std::make_shared<std::list<ChunkIter>>();
182 std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks;
183 std::unique_ptr<std::lock_guard<std::mutex>> gpu_lock;
184 std::unique_ptr<CudaAllocator> device_allocator;
187 new std::lock_guard<std::mutex>(executor->gpu_exec_mutex_[
chosen_device_id]));
188 device_allocator = std::make_unique<CudaAllocator>(
191 std::shared_ptr<FetchResult> fetch_result(
new FetchResult);
193 std::map<shared::TableKey, const TableFragments*> all_tables_fragments;
202 all_tables_fragments,
204 *chunk_iterators_ptr,
206 device_allocator.get(),
213 all_tables_fragments,
215 *chunk_iterators_ptr,
217 device_allocator.get(),
220 if (fetch_result->num_rows.empty()) {
227 LOG(
INFO) <<
"Dynamic Watchdog budget: CPU: "
243 throw std::runtime_error(
"Joins not supported through external execution");
250 executor->row_set_mem_owner_,
253 group_by_and_aggregate.initQueryMemoryDescriptor(
false, 0, 8,
nullptr,
false);
257 executor->plan_state_.get(),
266 std::unique_ptr<QueryExecutionContext> query_exe_context_owned;
269 int64_t total_num_input_rows{-1};
272 total_num_input_rows = 0;
273 std::for_each(fetch_result->num_rows.begin(),
274 fetch_result->num_rows.end(),
275 [&total_num_input_rows](
const std::vector<int64_t>& frag_row_count) {
277 frag_row_count.end(),
278 total_num_input_rows);
280 VLOG(2) <<
"total_num_input_rows=" << total_num_input_rows;
284 if (total_num_input_rows == 0) {
293 uint32_t start_rowid{0};
296 const auto& all_frag_row_offsets = shared_context.
getFragOffsets();
298 all_frag_row_offsets[
frag_list.begin()->fragment_ids.front()];
303 bool can_run_subkernels = shared_context.getThreadPool() !=
nullptr;
320 can_run_subkernels &&
326 if (can_run_subkernels) {
327 size_t total_rows = fetch_result->num_rows[0][0];
330 for (
size_t sub_start = start_rowid; sub_start < total_rows; sub_start += sub_size) {
331 sub_size = (sub_start + sub_size > total_rows) ? total_rows - sub_start : sub_size;
332 auto subtask = std::make_shared<KernelSubtask>(*
this,
336 total_num_input_rows,
340 shared_context.getThreadPool()->run(
341 [subtask, executor] { subtask->run(executor); });
355 query_exe_context_owned =
362 total_num_input_rows,
363 fetch_result->col_buffers,
364 fetch_result->frag_offsets,
365 executor->getRowSetMemoryOwner(),
375 CHECK(query_exe_context);
377 bool optimize_cuda_block_and_grid_sizes =
388 fetch_result->col_buffers,
390 fetch_result->num_rows,
391 fetch_result->frag_offsets,
398 optimize_cuda_block_and_grid_sizes);
401 VLOG(1) <<
"outer_table_key=" << outer_table_key
409 fetch_result->col_buffers,
412 fetch_result->num_rows,
413 fetch_result->frag_offsets,
422 optimize_cuda_block_and_grid_sizes);
425 std::list<std::shared_ptr<Chunk_NS::Chunk>> chunks_to_hold;
426 for (
const auto& chunk : chunks) {
431 chunks_to_hold.push_back(chunk);
437 VLOG(1) <<
"null device_results.";
452 }
catch (
const std::bad_alloc& e) {
461 kernel_.query_mem_desc.getQueryDescriptionType(),
474 void KernelSubtask::runImpl(Executor* executor) {
475 auto& query_exe_context_owned = shared_context_.getTlsExecutionContext().local();
476 const bool do_render = kernel_.render_info_ && kernel_.render_info_->isInSitu();
478 kernel_.query_comp_desc.getCompilationResult();
480 kernel_.ra_exe_unit_.union_all ? kernel_.frag_list[0].table_key
481 : kernel_.ra_exe_unit_.input_descs[0].getTableKey();
483 if (!query_exe_context_owned) {
487 std::vector<std::vector<const int8_t*>> col_buffers(
488 fetch_result_->col_buffers.size(),
489 std::vector<const int8_t*>(fetch_result_->col_buffers[0].size()));
490 std::vector<std::vector<uint64_t>> frag_offsets(
491 fetch_result_->frag_offsets.size(),
492 std::vector<uint64_t>(fetch_result_->frag_offsets[0].size()));
493 query_exe_context_owned = kernel_.query_mem_desc.getQueryExecutionContext(
494 kernel_.ra_exe_unit_,
496 kernel_.chosen_device_type,
497 kernel_.kernel_dispatch_mode,
498 kernel_.chosen_device_id,
500 total_num_input_rows_,
503 executor->getRowSetMemoryOwner(),
505 kernel_.query_mem_desc.sortOnGpu(),
508 do_render ? kernel_.render_info_ :
nullptr);
514 const auto& outer_tab_frag_ids = kernel_.frag_list[0].fragment_ids;
516 CHECK(query_exe_context);
518 bool optimize_cuda_block_and_grid_sizes =
520 kernel_.eo.optimize_cuda_block_and_grid_sizes;
521 if (kernel_.ra_exe_unit_.groupby_exprs.empty()) {
522 err = executor->executePlanWithoutGroupBy(kernel_.ra_exe_unit_,
524 kernel_.query_comp_desc.hoistLiterals(),
526 kernel_.ra_exe_unit_.target_exprs,
527 kernel_.chosen_device_type,
528 fetch_result_->col_buffers,
530 fetch_result_->num_rows,
531 fetch_result_->frag_offsets,
532 executor->getDataMgr(),
533 kernel_.chosen_device_id,
535 kernel_.ra_exe_unit_.input_descs.size(),
536 kernel_.eo.allow_runtime_query_interrupt,
537 do_render ? kernel_.render_info_ :
nullptr,
538 optimize_cuda_block_and_grid_sizes,
539 start_rowid_ + num_rows_to_process_);
541 err = executor->executePlanWithGroupBy(kernel_.ra_exe_unit_,
543 kernel_.query_comp_desc.hoistLiterals(),
545 kernel_.chosen_device_type,
546 fetch_result_->col_buffers,
549 fetch_result_->num_rows,
550 fetch_result_->frag_offsets,
551 executor->getDataMgr(),
552 kernel_.chosen_device_id,
554 kernel_.ra_exe_unit_.scan_limit,
556 kernel_.ra_exe_unit_.input_descs.size(),
557 kernel_.eo.allow_runtime_query_interrupt,
558 do_render ? kernel_.render_info_ :
nullptr,
559 optimize_cuda_block_and_grid_sizes,
560 start_rowid_ + num_rows_to_process_);
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
std::vector< Analyzer::Expr * > target_exprs
std::atomic_flag dynamic_watchdog_set
const ExecutionOptions & eo
size_t g_cpu_sub_task_size
const std::vector< uint64_t > & getFragOffsets()
static const int max_gpu_count
bool with_dynamic_watchdog
const std::optional< bool > union_all
const ExecutorDispatchMode kernel_dispatch_mode
const RelAlgExecutionUnit & ra_exe_unit_
std::vector< uint64_t > all_frag_row_offsets_
const int64_t rowid_lookup_key
std::mutex all_frag_row_offsets_mutex_
void addDeviceResults(ResultSetPtr &&device_results, std::vector< size_t > outer_table_fragment_ids)
std::vector< InputDescriptor > input_descs
const ExecutorDeviceType chosen_device_type
bool hoistLiterals() const
std::shared_ptr< ResultSet > ResultSetPtr
static const int32_t ERR_TOO_MANY_LITERALS
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
std::unique_ptr< ResultSet > run_query_external(const ExecutionUnitSql &sql, const FetchResult &fetch_result, const PlanState *plan_state, const ExternalQueryOutputSpec &output_spec)
RenderInfo * render_info_
static const int32_t ERR_STRING_CONST_IN_RESULTSET
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
const ColumnDescriptor * getColumnDesc() const
bool needs_skip_result(const ResultSetPtr &res)
ExecutorType executor_type
#define INJECT_TIMER(DESC)
static const int32_t ERR_OUT_OF_RENDER_MEM
const JoinQualsPerNestingLevel join_quals
const QueryMemoryDescriptor & query_mem_desc
DEVICE auto accumulate(ARGS &&...args)
const QueryCompilationDescriptor & query_comp_desc
static void computeAllTablesFragments(std::map< shared::TableKey, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
const std::shared_ptr< Analyzer::Estimator > estimator
static const int32_t ERR_OUT_OF_GPU_MEM
QueryDescriptionType getQueryDescriptionType() const
RUNTIME_EXPORT uint64_t dynamic_watchdog_init(unsigned ms_budget)
void runImpl(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const shared::ColumnKey & getColumnKey() const
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > all_fragment_results_
void run(Executor *executor, const size_t thread_idx, SharedKernelContext &shared_context)
const FragmentsList frag_list
ExecutionUnitSql serialize_to_sql(const RelAlgExecutionUnit *ra_exe_unit)
CUstream getQueryEngineCudaStreamForDevice(int device_num)
bool optimize_cuda_block_and_grid_sizes
bool query_has_inner_join(const RelAlgExecutionUnit &ra_exe_unit)
const std::vector< InputTableInfo > & getQueryInfos() const
ResultSetPtr device_results_
std::vector< std::pair< ResultSetPtr, std::vector< size_t > > > & getFragmentResults()
#define DEBUG_TIMER(name)
const char * what() const noexceptfinal
std::vector< TargetInfo > target_exprs_to_infos(const std::vector< Analyzer::Expr * > &targets, const QueryMemoryDescriptor &query_mem_desc)
unsigned dynamic_watchdog_time_limit
const std::vector< InputTableInfo > & query_infos_
bool allow_runtime_query_interrupt
auto getCompilationResult() const
static const int32_t ERR_OUT_OF_CPU_MEM
std::unique_ptr< QueryExecutionContext > getQueryExecutionContext(const RelAlgExecutionUnit &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const shared::TableKey &outer_table_key, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner >, const bool output_columnar, const bool sort_on_gpu, const size_t thread_idx, RenderInfo *) const
const ColumnFetcher & column_fetcher