OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
anonymous_namespace{ExecutionKernel.cpp} Namespace Reference

Functions

bool needs_skip_result (const ResultSetPtr &res)
 
bool query_has_inner_join (const RelAlgExecutionUnit &ra_exe_unit)
 
bool need_to_hold_chunk (const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
 
bool need_to_hold_chunk (const std::list< std::shared_ptr< Chunk_NS::Chunk >> &chunks, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)
 
size_t get_available_cpu_threads_per_task (Executor *executor, SharedKernelContext &shared_context)
 

Function Documentation

size_t anonymous_namespace{ExecutionKernel.cpp}::get_available_cpu_threads_per_task ( Executor executor,
SharedKernelContext shared_context 
)

Definition at line 161 of file ExecutionKernel.cpp.

References CHECK_GE, cpu_threads(), and SharedKernelContext::getNumAllocatedThreads().

Referenced by ExecutionKernel::runImpl().

162  {
163  // total # allocated slots (i.e., threads) for compiled kernels of the input query
164  auto const num_kernels = shared_context.getNumAllocatedThreads();
165  CHECK_GE(num_kernels, 1u);
166  size_t available_slots_per_task;
167  if (executor->executor_resource_mgr_) {
168  auto const resources_status = executor->executor_resource_mgr_->get_resource_info();
169  // # available slots (i.e., threads) in the resource pool; idle threads
170  auto const idle_cpu_slots =
171  resources_status.total_cpu_slots - resources_status.allocated_cpu_slots;
172  // we want to evenly use idle slots for each kernel task to avoid oversubscription
173  available_slots_per_task = 1u + (idle_cpu_slots + num_kernels - 1u) / num_kernels;
174  } else {
175  available_slots_per_task = std::max(static_cast<size_t>(cpu_threads()) / num_kernels,
176  static_cast<size_t>(1));
177  }
178  CHECK_GE(available_slots_per_task, 1u);
179  return available_slots_per_task;
180 }
#define CHECK_GE(x, y)
Definition: Logger.h:306
size_t getNumAllocatedThreads()
int cpu_threads()
Definition: thread_count.h:25

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{ExecutionKernel.cpp}::need_to_hold_chunk ( const Chunk_NS::Chunk chunk,
const RelAlgExecutionUnit ra_exe_unit,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const ExecutorDeviceType  device_type 
)

Definition at line 45 of file ExecutionKernel.cpp.

References CHECK, CHECK_EQ, shared::ColumnKey::column_id, ColumnDescriptor::columnId, ColumnDescriptor::columnType, CPU, ColumnDescriptor::db_id, Chunk_NS::Chunk::getColumnDesc(), Analyzer::ColumnVar::getColumnKey(), kENCODING_NONE, ColumnDescriptor::tableId, and RelAlgExecutionUnit::target_exprs.

Referenced by need_to_hold_chunk(), and ExecutionKernel::runImpl().

48  {
49  CHECK(chunk->getColumnDesc());
50  const auto& chunk_ti = chunk->getColumnDesc()->columnType;
51  if (device_type == ExecutorDeviceType::CPU &&
52  (chunk_ti.is_array() ||
53  (chunk_ti.is_string() && chunk_ti.get_compression() == kENCODING_NONE))) {
54  for (const auto target_expr : ra_exe_unit.target_exprs) {
55  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
56  if (col_var) {
57  const auto& column_key = col_var->getColumnKey();
58  return column_key.column_id == chunk->getColumnDesc()->columnId &&
59  column_key.table_id == chunk->getColumnDesc()->tableId &&
60  column_key.db_id == chunk->getColumnDesc()->db_id;
61  }
62  }
63  }
64  if (lazy_fetch_info.empty()) {
65  return false;
66  }
67  CHECK_EQ(lazy_fetch_info.size(), ra_exe_unit.target_exprs.size());
68  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); i++) {
69  const auto target_expr = ra_exe_unit.target_exprs[i];
70  const auto& col_lazy_fetch = lazy_fetch_info[i];
71  const auto col_var = dynamic_cast<const Analyzer::ColumnVar*>(target_expr);
72  if (col_var) {
73  const auto& column_key = col_var->getColumnKey();
74  if (column_key.column_id == chunk->getColumnDesc()->columnId &&
75  column_key.table_id == chunk->getColumnDesc()->tableId &&
76  column_key.db_id == chunk->getColumnDesc()->db_id) {
77  if (col_lazy_fetch.is_lazily_fetched) {
78  // hold lazy fetched inputs for later iteration
79  return true;
80  }
81  }
82  }
83  }
84  return false;
85 }
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:301
const ColumnDescriptor * getColumnDesc() const
Definition: Chunk.h:65
const shared::ColumnKey & getColumnKey() const
Definition: Analyzer.h:198
#define CHECK(condition)
Definition: Logger.h:291
SQLTypeInfo columnType

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{ExecutionKernel.cpp}::need_to_hold_chunk ( const std::list< std::shared_ptr< Chunk_NS::Chunk >> &  chunks,
const RelAlgExecutionUnit ra_exe_unit,
const std::vector< ColumnLazyFetchInfo > &  lazy_fetch_info,
const ExecutorDeviceType  device_type 
)

Definition at line 87 of file ExecutionKernel.cpp.

References need_to_hold_chunk().

90  {
91  for (const auto& chunk : chunks) {
92  if (need_to_hold_chunk(chunk.get(), ra_exe_unit, lazy_fetch_info, device_type)) {
93  return true;
94  }
95  }
96 
97  return false;
98 }
bool need_to_hold_chunk(const Chunk_NS::Chunk *chunk, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< ColumnLazyFetchInfo > &lazy_fetch_info, const ExecutorDeviceType device_type)

+ Here is the call graph for this function:

bool anonymous_namespace{ExecutionKernel.cpp}::needs_skip_result ( const ResultSetPtr res)

Definition at line 32 of file ExecutionKernel.cpp.

Referenced by SharedKernelContext::addDeviceResults().

32  {
33  return !res || res->definitelyHasNoRows();
34 }

+ Here is the caller graph for this function:

bool anonymous_namespace{ExecutionKernel.cpp}::query_has_inner_join ( const RelAlgExecutionUnit ra_exe_unit)
inline

Definition at line 36 of file ExecutionKernel.cpp.

References INNER, and RelAlgExecutionUnit::join_quals.

Referenced by ExecutionKernel::runImpl().

36  {
37  return (std::count_if(ra_exe_unit.join_quals.begin(),
38  ra_exe_unit.join_quals.end(),
39  [](const auto& join_condition) {
40  return join_condition.type == JoinType::INNER;
41  }) > 0);
42 }
const JoinQualsPerNestingLevel join_quals

+ Here is the caller graph for this function: