OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
QueryFragmentDescriptor Class Reference

#include <QueryFragmentDescriptor.h>

Public Member Functions

 QueryFragmentDescriptor (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const std::vector< Data_Namespace::MemoryInfo > &gpu_mem_infos, const double gpu_input_mem_limit_percent, const std::vector< size_t > allowed_outer_fragment_indices)
 
void buildFragmentKernelMap (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const ExecutorDeviceType &device_type, const bool enable_multifrag_kernels, const bool enable_inner_join_fragment_skipping, Executor *executor)
 
template<typename DISPATCH_FCN >
void assignFragsToMultiDispatch (DISPATCH_FCN f) const
 
template<typename DISPATCH_FCN >
void assignFragsToKernelDispatch (DISPATCH_FCN f, const RelAlgExecutionUnit &ra_exe_unit) const
 
bool shouldCheckWorkUnitWatchdog () const
 

Static Public Member Functions

static void computeAllTablesFragments (std::map< shared::TableKey, const TableFragments * > &all_tables_fragments, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos)
 

Protected Member Functions

void buildFragmentPerKernelMapForUnion (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, Executor *executor)
 
void buildFragmentPerKernelMap (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, Executor *executor)
 
void buildMultifragKernelMap (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, const bool enable_inner_join_fragment_skipping, Executor *executor)
 
void buildFragmentPerKernelForTable (const TableFragments *fragments, const RelAlgExecutionUnit &ra_exe_unit, const InputDescriptor &table_desc, const bool is_temporary_table, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ChunkMetadataVector &deleted_chunk_metadata_vec, const std::optional< size_t > table_desc_offset, const ExecutorDeviceType &device_type, Executor *executor)
 
bool terminateDispatchMaybe (size_t &tuple_count, const RelAlgExecutionUnit &ra_exe_unit, const ExecutionKernelDescriptor &kernel) const
 
void checkDeviceMemoryUsage (const Fragmenter_Namespace::FragmentInfo &fragment, const int device_id, const size_t num_cols)
 

Protected Attributes

std::vector< size_t > allowed_outer_fragment_indices_
 
size_t outer_fragments_size_ = 0
 
int64_t rowid_lookup_key_ = -1
 
std::map< shared::TableKey,
const TableFragments * > 
selected_tables_fragments_
 
std::map< int, std::vector
< ExecutionKernelDescriptor > > 
execution_kernels_per_device_
 
double gpu_input_mem_limit_percent_
 
std::map< size_t, size_t > tuple_count_per_device_
 
std::map< size_t, size_t > available_gpu_mem_bytes_
 

Detailed Description

Definition at line 68 of file QueryFragmentDescriptor.h.

Constructor & Destructor Documentation

QueryFragmentDescriptor::QueryFragmentDescriptor ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< InputTableInfo > &  query_infos,
const std::vector< Data_Namespace::MemoryInfo > &  gpu_mem_infos,
const double  gpu_input_mem_limit_percent,
const std::vector< size_t >  allowed_outer_fragment_indices 
)

Definition at line 25 of file QueryFragmentDescriptor.cpp.

References available_gpu_mem_bytes_, CHECK_EQ, RelAlgExecutionUnit::input_descs, and selected_tables_fragments_.

31  : allowed_outer_fragment_indices_(allowed_outer_fragment_indices)
32  , gpu_input_mem_limit_percent_(gpu_input_mem_limit_percent) {
33  const size_t input_desc_count{ra_exe_unit.input_descs.size()};
34  CHECK_EQ(query_infos.size(), input_desc_count);
35  for (size_t table_idx = 0; table_idx < input_desc_count; ++table_idx) {
36  const auto& table_key = ra_exe_unit.input_descs[table_idx].getTableKey();
37  if (!selected_tables_fragments_.count(table_key)) {
38  selected_tables_fragments_[table_key] = &query_infos[table_idx].info.fragments;
39  }
40  }
41 
42  for (size_t device_id = 0; device_id < gpu_mem_infos.size(); device_id++) {
43  const auto& gpu_mem_info = gpu_mem_infos[device_id];
44  available_gpu_mem_bytes_[device_id] =
45  gpu_mem_info.maxNumPages * gpu_mem_info.pageSize;
46  }
47 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< InputDescriptor > input_descs
std::vector< size_t > allowed_outer_fragment_indices_
std::map< shared::TableKey, const TableFragments * > selected_tables_fragments_
std::map< size_t, size_t > available_gpu_mem_bytes_

Member Function Documentation

template<typename DISPATCH_FCN >
void QueryFragmentDescriptor::assignFragsToKernelDispatch ( DISPATCH_FCN  f,
const RelAlgExecutionUnit ra_exe_unit 
) const
inline

Dispatch one fragment for each device. Iterate the device map and dispatch one kernel for each device per iteration. This allows balanced dispatch as well as early termination if the number of rows passing the kernel can be computed at dispatch time and the scan limit is reached.

Definition at line 111 of file QueryFragmentDescriptor.h.

References CHECK, execution_kernels_per_device_, anonymous_namespace{Utm.h}::f, rowid_lookup_key_, and terminateDispatchMaybe().

112  {
113  if (execution_kernels_per_device_.empty()) {
114  return;
115  }
116 
117  size_t tuple_count = 0;
118 
119  std::unordered_map<int, size_t> execution_kernel_index;
120  for (const auto& device_itr : execution_kernels_per_device_) {
121  CHECK(execution_kernel_index.insert(std::make_pair(device_itr.first, size_t(0)))
122  .second);
123  }
124 
125  bool dispatch_finished = false;
126  while (!dispatch_finished) {
127  dispatch_finished = true;
128  for (const auto& device_itr : execution_kernels_per_device_) {
129  auto& kernel_idx = execution_kernel_index[device_itr.first];
130  if (kernel_idx < device_itr.second.size()) {
131  dispatch_finished = false;
132  const auto& execution_kernel = device_itr.second[kernel_idx++];
133  f(device_itr.first, execution_kernel.fragments, rowid_lookup_key_);
134  if (terminateDispatchMaybe(tuple_count, ra_exe_unit, execution_kernel)) {
135  return;
136  }
137  }
138  }
139  }
140  }
bool terminateDispatchMaybe(size_t &tuple_count, const RelAlgExecutionUnit &ra_exe_unit, const ExecutionKernelDescriptor &kernel) const
constexpr double f
Definition: Utm.h:31
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

template<typename DISPATCH_FCN >
void QueryFragmentDescriptor::assignFragsToMultiDispatch ( DISPATCH_FCN  f) const
inline

Dispatch multi-fragment kernels. Currently GPU only. Each GPU should have only one kernel, with multiple fragments in its fragments list.

Definition at line 94 of file QueryFragmentDescriptor.h.

References CHECK_EQ, execution_kernels_per_device_, anonymous_namespace{Utm.h}::f, and rowid_lookup_key_.

94  {
95  for (const auto& device_itr : execution_kernels_per_device_) {
96  const auto& execution_kernels = device_itr.second;
97  CHECK_EQ(execution_kernels.size(), size_t(1));
98 
99  const auto& fragments_list = execution_kernels.front().fragments;
100  f(device_itr.first, fragments_list, rowid_lookup_key_);
101  }
102  }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
constexpr double f
Definition: Utm.h:31
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
void QueryFragmentDescriptor::buildFragmentKernelMap ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< uint64_t > &  frag_offsets,
const int  device_count,
const ExecutorDeviceType device_type,
const bool  enable_multifrag_kernels,
const bool  enable_inner_join_fragment_skipping,
Executor executor 
)

Definition at line 63 of file QueryFragmentDescriptor.cpp.

References buildFragmentPerKernelMap(), buildFragmentPerKernelMapForUnion(), buildMultifragKernelMap(), RelAlgExecutionUnit::input_descs, and RelAlgExecutionUnit::union_all.

70  {
71  // For joins, only consider the cardinality of the LHS
72  // columns in the bytes per row count.
73  std::set<shared::TableKey> lhs_table_keys;
74  for (const auto& input_desc : ra_exe_unit.input_descs) {
75  if (input_desc.getNestLevel() == 0) {
76  lhs_table_keys.insert(input_desc.getTableKey());
77  }
78  }
79 
80  const auto num_bytes_for_row = executor->getNumBytesForFetchedRow(lhs_table_keys);
81 
82  if (ra_exe_unit.union_all) {
84  frag_offsets,
85  device_count,
86  num_bytes_for_row,
87  device_type,
88  executor);
89  } else if (enable_multifrag_kernels) {
90  buildMultifragKernelMap(ra_exe_unit,
91  frag_offsets,
92  device_count,
93  num_bytes_for_row,
94  device_type,
95  enable_inner_join_fragment_skipping,
96  executor);
97  } else {
98  buildFragmentPerKernelMap(ra_exe_unit,
99  frag_offsets,
100  device_count,
101  num_bytes_for_row,
102  device_type,
103  executor);
104  }
105 }
const std::optional< bool > union_all
std::vector< InputDescriptor > input_descs
void buildFragmentPerKernelMap(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, Executor *executor)
void buildMultifragKernelMap(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, const bool enable_inner_join_fragment_skipping, Executor *executor)
void buildFragmentPerKernelMapForUnion(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ExecutorDeviceType &device_type, Executor *executor)

+ Here is the call graph for this function:

void QueryFragmentDescriptor::buildFragmentPerKernelForTable ( const TableFragments fragments,
const RelAlgExecutionUnit ra_exe_unit,
const InputDescriptor table_desc,
const bool  is_temporary_table,
const std::vector< uint64_t > &  frag_offsets,
const int  device_count,
const size_t  num_bytes_for_row,
const ChunkMetadataVector deleted_chunk_metadata_vec,
const std::optional< size_t >  table_desc_offset,
const ExecutorDeviceType device_type,
Executor executor 
)
protected

Definition at line 107 of file QueryFragmentDescriptor.cpp.

References allowed_outer_fragment_indices_, CHECK, CHECK_GE, CHECK_GT, checkDeviceMemoryUsage(), CPU, Data_Namespace::CPU_LEVEL, execution_kernels_per_device_, GPU, Data_Namespace::GPU_LEVEL, RelAlgExecutionUnit::input_descs, rowid_lookup_key_, selected_tables_fragments_, and RelAlgExecutionUnit::simple_quals.

Referenced by buildFragmentPerKernelMap(), and buildFragmentPerKernelMapForUnion().

118  {
119  auto get_fragment_tuple_count = [&deleted_chunk_metadata_vec, &is_temporary_table](
120  const auto& fragment) -> std::optional<size_t> {
121  // returning std::nullopt disables execution dispatch optimizations based on tuple
122  // counts as it signals to the dispatch mechanism that a reliable tuple count cannot
123  // be obtained. This is the case for fragments which have deleted rows, temporary
124  // table fragments, or fragments in a UNION query.
125  if (is_temporary_table) {
126  // 31 Mar 2021 MAT TODO I think that the fragment Tuple count should be ok
127  // need to double check that at some later date
128  return std::nullopt;
129  }
130  if (deleted_chunk_metadata_vec.empty()) {
131  return fragment.getNumTuples();
132  }
133  const auto fragment_id = fragment.fragmentId;
134  CHECK_GE(fragment_id, 0);
135  if (static_cast<size_t>(fragment_id) < deleted_chunk_metadata_vec.size()) {
136  const auto& chunk_metadata = deleted_chunk_metadata_vec[fragment_id];
137  if (chunk_metadata.second->chunkStats.max.tinyintval == 1) {
138  return std::nullopt;
139  }
140  }
141  return fragment.getNumTuples();
142  };
143 
144  for (size_t i = 0; i < fragments->size(); i++) {
145  if (!allowed_outer_fragment_indices_.empty()) {
146  if (std::find(allowed_outer_fragment_indices_.begin(),
148  i) == allowed_outer_fragment_indices_.end()) {
149  continue;
150  }
151  }
152 
153  const auto& fragment = (*fragments)[i];
154  const auto skip_frag = executor->skipFragment(
155  table_desc, fragment, ra_exe_unit.simple_quals, frag_offsets, i);
156  if (skip_frag.first) {
157  continue;
158  }
159  rowid_lookup_key_ = std::max(rowid_lookup_key_, skip_frag.second);
160  const int chosen_device_count =
161  device_type == ExecutorDeviceType::CPU ? 1 : device_count;
162  CHECK_GT(chosen_device_count, 0);
163  const auto memory_level = device_type == ExecutorDeviceType::GPU
166  const int device_id = (device_type == ExecutorDeviceType::CPU || fragment.shard == -1)
167  ? fragment.deviceIds[static_cast<int>(memory_level)]
168  : fragment.shard % chosen_device_count;
169 
170  if (device_type == ExecutorDeviceType::GPU) {
171  checkDeviceMemoryUsage(fragment, device_id, num_bytes_for_row);
172  }
173 
174  ExecutionKernelDescriptor execution_kernel_desc{
175  device_id, {}, get_fragment_tuple_count(fragment)};
176  if (table_desc_offset) {
177  const auto frag_ids =
178  executor->getTableFragmentIndices(ra_exe_unit,
179  device_type,
180  *table_desc_offset,
181  i,
183  executor->getInnerTabIdToJoinCond());
184  const auto& table_key = ra_exe_unit.input_descs[*table_desc_offset].getTableKey();
185  execution_kernel_desc.fragments.emplace_back(
186  FragmentsPerTable{table_key, frag_ids});
187 
188  } else {
189  for (size_t j = 0; j < ra_exe_unit.input_descs.size(); ++j) {
190  const auto frag_ids =
191  executor->getTableFragmentIndices(ra_exe_unit,
192  device_type,
193  j,
194  i,
196  executor->getInnerTabIdToJoinCond());
197  const auto& table_key = ra_exe_unit.input_descs[j].getTableKey();
198  auto table_frags_it = selected_tables_fragments_.find(table_key);
199  CHECK(table_frags_it != selected_tables_fragments_.end());
200 
201  execution_kernel_desc.fragments.emplace_back(
202  FragmentsPerTable{table_key, frag_ids});
203  }
204  }
205 
206  auto itr = execution_kernels_per_device_.find(device_id);
207  if (itr == execution_kernels_per_device_.end()) {
208  auto const pair = execution_kernels_per_device_.insert(std::make_pair(
209  device_id,
210  std::vector<ExecutionKernelDescriptor>{std::move(execution_kernel_desc)}));
211  CHECK(pair.second);
212  } else {
213  itr->second.emplace_back(std::move(execution_kernel_desc));
214  }
215  }
216 }
std::vector< InputDescriptor > input_descs
#define CHECK_GE(x, y)
Definition: Logger.h:306
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
void checkDeviceMemoryUsage(const Fragmenter_Namespace::FragmentInfo &fragment, const int device_id, const size_t num_cols)
#define CHECK(condition)
Definition: Logger.h:291
std::vector< size_t > allowed_outer_fragment_indices_
std::map< shared::TableKey, const TableFragments * > selected_tables_fragments_
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void QueryFragmentDescriptor::buildFragmentPerKernelMap ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< uint64_t > &  frag_offsets,
const int  device_count,
const size_t  num_bytes_for_row,
const ExecutorDeviceType device_type,
Executor executor 
)
protected

Definition at line 282 of file QueryFragmentDescriptor.cpp.

References buildFragmentPerKernelForTable(), CHECK, CHECK_GT, Catalog_Namespace::SysCatalog::getCatalog(), RelAlgExecutionUnit::input_descs, Catalog_Namespace::SysCatalog::instance(), outer_fragments_size_, selected_tables_fragments_, and table_is_temporary().

Referenced by buildFragmentKernelMap().

288  {
289  const auto& outer_table_desc = ra_exe_unit.input_descs.front();
290  const auto& outer_table_key = outer_table_desc.getTableKey();
291  auto it = selected_tables_fragments_.find(outer_table_key);
292  CHECK(it != selected_tables_fragments_.end());
293  const auto outer_fragments = it->second;
294  outer_fragments_size_ = outer_fragments->size();
295 
296  ChunkMetadataVector deleted_chunk_metadata_vec;
297 
298  bool is_temporary_table = false;
299  if (outer_table_key.table_id > 0) {
300  CHECK_GT(outer_table_key.db_id, 0);
301  const auto catalog =
302  Catalog_Namespace::SysCatalog::instance().getCatalog(outer_table_key.db_id);
303  CHECK(catalog);
304  // Temporary tables will not have a table descriptor and not have deleted rows.
305  const auto td = catalog->getMetadataForTable(outer_table_key.table_id);
306  CHECK(td);
307  if (table_is_temporary(td)) {
308  // for temporary tables, we won't have delete column metadata available. However, we
309  // know the table fits in memory as it is a temporary table, so signal to the lower
310  // layers that we can disregard the early out select * optimization
311  is_temporary_table = true;
312  } else {
313  const auto deleted_cd = catalog->getDeletedColumnIfRowsDeleted(td);
314  if (deleted_cd) {
315  // 01 Apr 2021 MAT TODO this code is called on logical tables (ie not the shards)
316  // I wonder if this makes sense in those cases
317  td->fragmenter->getFragmenterId();
318  auto frags = td->fragmenter->getFragmentsForQuery().fragments;
319  for (auto frag : frags) {
320  auto chunk_meta_it =
321  frag.getChunkMetadataMapPhysical().find(deleted_cd->columnId);
322  if (chunk_meta_it != frag.getChunkMetadataMapPhysical().end()) {
323  const auto& chunk_meta = chunk_meta_it->second;
324  ChunkKey chunk_key_prefix = {outer_table_key.db_id,
325  outer_table_key.table_id,
326  deleted_cd->columnId,
327  frag.fragmentId};
328  deleted_chunk_metadata_vec.emplace_back(
329  std::pair{chunk_key_prefix, chunk_meta});
330  }
331  }
332  }
333  }
334  }
335 
336  buildFragmentPerKernelForTable(outer_fragments,
337  ra_exe_unit,
338  outer_table_desc,
339  is_temporary_table,
340  frag_offsets,
341  device_count,
342  num_bytes_for_row,
343  deleted_chunk_metadata_vec,
344  std::nullopt,
345  device_type,
346  executor);
347 }
std::vector< int > ChunkKey
Definition: types.h:36
std::vector< InputDescriptor > input_descs
#define CHECK_GT(x, y)
Definition: Logger.h:305
static SysCatalog & instance()
Definition: SysCatalog.h:343
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata >>> ChunkMetadataVector
std::shared_ptr< Catalog > getCatalog(const std::string &dbName)
bool table_is_temporary(const TableDescriptor *const td)
void buildFragmentPerKernelForTable(const TableFragments *fragments, const RelAlgExecutionUnit &ra_exe_unit, const InputDescriptor &table_desc, const bool is_temporary_table, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ChunkMetadataVector &deleted_chunk_metadata_vec, const std::optional< size_t > table_desc_offset, const ExecutorDeviceType &device_type, Executor *executor)
#define CHECK(condition)
Definition: Logger.h:291
std::map< shared::TableKey, const TableFragments * > selected_tables_fragments_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void QueryFragmentDescriptor::buildFragmentPerKernelMapForUnion ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< uint64_t > &  frag_offsets,
const int  device_count,
const size_t  num_bytes_for_row,
const ExecutorDeviceType device_type,
Executor executor 
)
protected

Definition at line 218 of file QueryFragmentDescriptor.cpp.

References gpu_enabled::accumulate(), buildFragmentPerKernelForTable(), CHECK, CHECK_GT, execution_kernels_per_device_, Catalog_Namespace::get_metadata_for_table(), RelAlgExecutionUnit::input_descs, shared::printContainer(), selected_tables_fragments_, table_is_temporary(), and VLOG.

Referenced by buildFragmentKernelMap().

224  {
225  for (size_t j = 0; j < ra_exe_unit.input_descs.size(); ++j) {
226  auto const& table_desc = ra_exe_unit.input_descs[j];
227  const auto& table_key = table_desc.getTableKey();
228  TableFragments const* fragments = selected_tables_fragments_.at(table_key);
229 
230  auto data_mgr = executor->getDataMgr();
231  ChunkMetadataVector deleted_chunk_metadata_vec;
232 
233  bool is_temporary_table = false;
234  if (table_key.table_id > 0) {
235  // Temporary tables will not have a table descriptor and not have deleted rows.
236  CHECK_GT(table_key.db_id, 0);
237  const auto td = Catalog_Namespace::get_metadata_for_table(table_key);
238  CHECK(td);
239  if (table_is_temporary(td)) {
240  // for temporary tables, we won't have delete column metadata available. However,
241  // we know the table fits in memory as it is a temporary table, so signal to the
242  // lower layers that we can disregard the early out select * optimization
243  is_temporary_table = true;
244  } else {
245  const auto deleted_cd = executor->plan_state_->getDeletedColForTable(table_key);
246  if (deleted_cd) {
247  ChunkKey chunk_key_prefix = {
248  table_key.db_id, table_key.table_id, deleted_cd->columnId};
249  data_mgr->getChunkMetadataVecForKeyPrefix(deleted_chunk_metadata_vec,
250  chunk_key_prefix);
251  }
252  }
253  }
254 
256  ra_exe_unit,
257  table_desc,
258  is_temporary_table,
259  frag_offsets,
260  device_count,
261  num_bytes_for_row,
262  {},
263  j,
264  device_type,
265  executor);
266 
267  std::vector<int> table_ids =
270  std::vector<int>(),
271  [](auto&& vec, auto& exe_kern) {
272  vec.push_back(exe_kern.fragments[0].table_key.table_id);
273  return vec;
274  });
275  VLOG(1) << "execution_kernels_per_device_.size()="
277  << " execution_kernels_per_device_[0][*].fragments[0].table_id="
278  << shared::printContainer(table_ids);
279  }
280 }
std::vector< int > ChunkKey
Definition: types.h:36
std::vector< Fragmenter_Namespace::FragmentInfo > TableFragments
std::vector< InputDescriptor > input_descs
const TableDescriptor * get_metadata_for_table(const ::shared::TableKey &table_key, bool populate_fragmenter)
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
DEVICE auto accumulate(ARGS &&...args)
Definition: gpu_enabled.h:42
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata >>> ChunkMetadataVector
bool table_is_temporary(const TableDescriptor *const td)
void buildFragmentPerKernelForTable(const TableFragments *fragments, const RelAlgExecutionUnit &ra_exe_unit, const InputDescriptor &table_desc, const bool is_temporary_table, const std::vector< uint64_t > &frag_offsets, const int device_count, const size_t num_bytes_for_row, const ChunkMetadataVector &deleted_chunk_metadata_vec, const std::optional< size_t > table_desc_offset, const ExecutorDeviceType &device_type, Executor *executor)
#define CHECK(condition)
Definition: Logger.h:291
PrintContainer< CONTAINER > printContainer(CONTAINER &container)
Definition: misc.h:107
std::map< shared::TableKey, const TableFragments * > selected_tables_fragments_
#define VLOG(n)
Definition: Logger.h:387

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void QueryFragmentDescriptor::buildMultifragKernelMap ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< uint64_t > &  frag_offsets,
const int  device_count,
const size_t  num_bytes_for_row,
const ExecutorDeviceType device_type,
const bool  enable_inner_join_fragment_skipping,
Executor executor 
)
protected

Definition at line 349 of file QueryFragmentDescriptor.cpp.

References allowed_outer_fragment_indices_, CHECK, CHECK_EQ, checkDeviceMemoryUsage(), execution_kernels_per_device_, GPU, Data_Namespace::GPU_LEVEL, RelAlgExecutionUnit::input_descs, outer_fragments_size_, rowid_lookup_key_, selected_tables_fragments_, and RelAlgExecutionUnit::simple_quals.

Referenced by buildFragmentKernelMap().

356  {
357  // Allocate all the fragments of the tables involved in the query to available
358  // devices. The basic idea: the device is decided by the outer table in the
359  // query (the first table in a join) and we need to broadcast the fragments
360  // in the inner table to each device. Sharding will change this model.
361  const auto& outer_table_desc = ra_exe_unit.input_descs.front();
362  const auto& outer_table_key = outer_table_desc.getTableKey();
363  auto it = selected_tables_fragments_.find(outer_table_key);
364  CHECK(it != selected_tables_fragments_.end());
365  const auto outer_fragments = it->second;
366  outer_fragments_size_ = outer_fragments->size();
367 
368  const auto inner_table_id_to_join_condition = executor->getInnerTabIdToJoinCond();
369 
370  for (size_t outer_frag_id = 0; outer_frag_id < outer_fragments->size();
371  ++outer_frag_id) {
372  if (!allowed_outer_fragment_indices_.empty()) {
373  if (std::find(allowed_outer_fragment_indices_.begin(),
375  outer_frag_id) == allowed_outer_fragment_indices_.end()) {
376  continue;
377  }
378  }
379 
380  const auto& fragment = (*outer_fragments)[outer_frag_id];
381  auto skip_frag = executor->skipFragment(outer_table_desc,
382  fragment,
383  ra_exe_unit.simple_quals,
384  frag_offsets,
385  outer_frag_id);
386  if (enable_inner_join_fragment_skipping &&
387  (skip_frag == std::pair<bool, int64_t>(false, -1))) {
388  skip_frag = executor->skipFragmentInnerJoins(
389  outer_table_desc, ra_exe_unit, fragment, frag_offsets, outer_frag_id);
390  }
391  if (skip_frag.first) {
392  continue;
393  }
394  const int device_id =
395  fragment.shard == -1
396  ? fragment.deviceIds[static_cast<int>(Data_Namespace::GPU_LEVEL)]
397  : fragment.shard % device_count;
398  if (device_type == ExecutorDeviceType::GPU) {
399  checkDeviceMemoryUsage(fragment, device_id, num_bytes_for_row);
400  }
401  for (size_t j = 0; j < ra_exe_unit.input_descs.size(); ++j) {
402  const auto& table_key = ra_exe_unit.input_descs[j].getTableKey();
403  auto table_frags_it = selected_tables_fragments_.find(table_key);
404  CHECK(table_frags_it != selected_tables_fragments_.end());
405  const auto frag_ids =
406  executor->getTableFragmentIndices(ra_exe_unit,
407  device_type,
408  j,
409  outer_frag_id,
411  inner_table_id_to_join_condition);
412 
413  if (execution_kernels_per_device_.find(device_id) ==
415  std::vector<ExecutionKernelDescriptor> kernel_descs{
416  ExecutionKernelDescriptor{device_id, FragmentsList{}, std::nullopt}};
417  CHECK(
418  execution_kernels_per_device_.insert(std::make_pair(device_id, kernel_descs))
419  .second);
420  }
421 
422  // Multifrag kernels only have one execution kernel per device. Grab the execution
423  // kernel object and push back into its fragments list.
424  CHECK_EQ(execution_kernels_per_device_[device_id].size(), size_t(1));
425  auto& execution_kernel = execution_kernels_per_device_[device_id].front();
426 
427  auto& kernel_frag_list = execution_kernel.fragments;
428  if (kernel_frag_list.size() < j + 1) {
429  kernel_frag_list.emplace_back(FragmentsPerTable{table_key, frag_ids});
430  } else {
431  CHECK_EQ(kernel_frag_list[j].table_key, table_key);
432  auto& curr_frag_ids = kernel_frag_list[j].fragment_ids;
433  for (const int frag_id : frag_ids) {
434  if (std::find(curr_frag_ids.begin(), curr_frag_ids.end(), frag_id) ==
435  curr_frag_ids.end()) {
436  curr_frag_ids.push_back(frag_id);
437  }
438  }
439  }
440  }
441  rowid_lookup_key_ = std::max(rowid_lookup_key_, skip_frag.second);
442  }
443 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< InputDescriptor > input_descs
std::vector< FragmentsPerTable > FragmentsList
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
void checkDeviceMemoryUsage(const Fragmenter_Namespace::FragmentInfo &fragment, const int device_id, const size_t num_cols)
#define CHECK(condition)
Definition: Logger.h:291
std::vector< size_t > allowed_outer_fragment_indices_
std::map< shared::TableKey, const TableFragments * > selected_tables_fragments_
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void QueryFragmentDescriptor::checkDeviceMemoryUsage ( const Fragmenter_Namespace::FragmentInfo fragment,
const int  device_id,
const size_t  num_cols 
)
protected

Definition at line 479 of file QueryFragmentDescriptor.cpp.

References available_gpu_mem_bytes_, CHECK_GE, g_cluster, Fragmenter_Namespace::FragmentInfo::getNumTuples(), gpu_input_mem_limit_percent_, LOG, tuple_count_per_device_, and logger::WARNING.

Referenced by buildFragmentPerKernelForTable(), and buildMultifragKernelMap().

482  {
483  if (g_cluster) {
484  // Disabled in distributed mode for now
485  return;
486  }
487  CHECK_GE(device_id, 0);
488  tuple_count_per_device_[device_id] += fragment.getNumTuples();
489  const size_t gpu_bytes_limit =
491  if (tuple_count_per_device_[device_id] * num_bytes_for_row > gpu_bytes_limit) {
492  LOG(WARNING) << "Not enough memory on device " << device_id
493  << " for input chunks totaling "
494  << tuple_count_per_device_[device_id] * num_bytes_for_row
495  << " bytes (available device memory: " << gpu_bytes_limit << " bytes)";
496  throw QueryMustRunOnCpu();
497  }
498 }
std::map< size_t, size_t > tuple_count_per_device_
#define LOG(tag)
Definition: Logger.h:285
#define CHECK_GE(x, y)
Definition: Logger.h:306
bool g_cluster
std::map< size_t, size_t > available_gpu_mem_bytes_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void QueryFragmentDescriptor::computeAllTablesFragments ( std::map< shared::TableKey, const TableFragments * > &  all_tables_fragments,
const RelAlgExecutionUnit ra_exe_unit,
const std::vector< InputTableInfo > &  query_infos 
)
static

Definition at line 49 of file QueryFragmentDescriptor.cpp.

References CHECK_EQ, and RelAlgExecutionUnit::input_descs.

Referenced by ExecutionKernel::runImpl().

52  {
53  for (size_t tab_idx = 0; tab_idx < ra_exe_unit.input_descs.size(); ++tab_idx) {
54  const auto& table_key = ra_exe_unit.input_descs[tab_idx].getTableKey();
55  CHECK_EQ(query_infos[tab_idx].table_key, table_key);
56  const auto& fragments = query_infos[tab_idx].info.fragments;
57  if (!all_tables_fragments.count(table_key)) {
58  all_tables_fragments.insert(std::make_pair(table_key, &fragments));
59  }
60  }
61 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< InputDescriptor > input_descs

+ Here is the caller graph for this function:

bool QueryFragmentDescriptor::shouldCheckWorkUnitWatchdog ( ) const
inline

Definition at line 142 of file QueryFragmentDescriptor.h.

References execution_kernels_per_device_, and rowid_lookup_key_.

142  {
143  return rowid_lookup_key_ < 0 && !execution_kernels_per_device_.empty();
144  }
std::map< int, std::vector< ExecutionKernelDescriptor > > execution_kernels_per_device_
bool QueryFragmentDescriptor::terminateDispatchMaybe ( size_t &  tuple_count,
const RelAlgExecutionUnit ra_exe_unit,
const ExecutionKernelDescriptor kernel 
) const
protected

Definition at line 461 of file QueryFragmentDescriptor.cpp.

References anonymous_namespace{QueryFragmentDescriptor.cpp}::is_sample_query(), SortInfo::limit, SortInfo::offset, ExecutionKernelDescriptor::outer_tuple_count, and RelAlgExecutionUnit::sort_info.

Referenced by assignFragsToKernelDispatch().

464  {
465  const auto sample_query_limit =
466  ra_exe_unit.sort_info.limit + ra_exe_unit.sort_info.offset;
467  if (!kernel.outer_tuple_count) {
468  return false;
469  } else {
470  tuple_count += *kernel.outer_tuple_count;
471  if (is_sample_query(ra_exe_unit) && sample_query_limit > 0 &&
472  tuple_count >= sample_query_limit) {
473  return true;
474  }
475  }
476  return false;
477 }
std::optional< size_t > outer_tuple_count
const size_t limit
bool is_sample_query(const RelAlgExecutionUnit &ra_exe_unit)
const size_t offset

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Member Data Documentation

std::vector<size_t> QueryFragmentDescriptor::allowed_outer_fragment_indices_
protected
std::map<size_t, size_t> QueryFragmentDescriptor::available_gpu_mem_bytes_
protected

Definition at line 157 of file QueryFragmentDescriptor.h.

Referenced by checkDeviceMemoryUsage(), and QueryFragmentDescriptor().

std::map<int, std::vector<ExecutionKernelDescriptor> > QueryFragmentDescriptor::execution_kernels_per_device_
protected
double QueryFragmentDescriptor::gpu_input_mem_limit_percent_
protected

Definition at line 155 of file QueryFragmentDescriptor.h.

Referenced by checkDeviceMemoryUsage().

size_t QueryFragmentDescriptor::outer_fragments_size_ = 0
protected
int64_t QueryFragmentDescriptor::rowid_lookup_key_ = -1
protected
std::map<shared::TableKey, const TableFragments*> QueryFragmentDescriptor::selected_tables_fragments_
protected
std::map<size_t, size_t> QueryFragmentDescriptor::tuple_count_per_device_
protected

Definition at line 156 of file QueryFragmentDescriptor.h.

Referenced by checkDeviceMemoryUsage().


The documentation for this class was generated from the following files: