OmniSciDB  a987f07e93
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InputMetadata.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "InputMetadata.h"
18 #include "Execute.h"
19 
20 #include "../Fragmenter/Fragmenter.h"
21 
22 #include <tbb/parallel_for.h>
23 #include <tbb/task_arena.h>
24 #include <future>
25 
26 extern bool g_enable_data_recycler;
27 extern bool g_use_chunk_metadata_cache;
28 
29 InputTableInfoCache::InputTableInfoCache(Executor* executor) : executor_(executor) {}
30 
31 namespace {
32 
34  const Fragmenter_Namespace::TableInfo& table_info) {
35  Fragmenter_Namespace::TableInfo table_info_copy;
36  table_info_copy.chunkKeyPrefix = table_info.chunkKeyPrefix;
37  table_info_copy.fragments = table_info.fragments;
38  table_info_copy.setPhysicalNumTuples(table_info.getPhysicalNumTuples());
39  return table_info_copy;
40 }
41 
42 } // namespace
43 
45  const std::vector<const TableDescriptor*>& shard_tables) {
46  size_t total_number_of_tuples{0};
47  Fragmenter_Namespace::TableInfo table_info_all_shards;
48  for (const TableDescriptor* shard_table : shard_tables) {
49  CHECK(shard_table->fragmenter);
50  const auto& shard_metainfo = shard_table->fragmenter->getFragmentsForQuery();
51  total_number_of_tuples += shard_metainfo.getPhysicalNumTuples();
52  table_info_all_shards.fragments.reserve(table_info_all_shards.fragments.size() +
53  shard_metainfo.fragments.size());
54  table_info_all_shards.fragments.insert(table_info_all_shards.fragments.end(),
55  shard_metainfo.fragments.begin(),
56  shard_metainfo.fragments.end());
57  }
58  table_info_all_shards.setPhysicalNumTuples(total_number_of_tuples);
59  return table_info_all_shards;
60 }
61 
63  const auto it = cache_.find(table_id);
64  if (it != cache_.end()) {
65  const auto& table_info = it->second;
66  return copy_table_info(table_info);
67  }
68  const auto cat = executor_->getCatalog();
69  CHECK(cat);
70  const auto td = cat->getMetadataForTable(table_id);
71  CHECK(td);
72  const auto shard_tables = cat->getPhysicalTablesDescriptors(td);
73  auto table_info = build_table_info(shard_tables);
74  auto it_ok = cache_.emplace(table_id, copy_table_info(table_info));
75  CHECK(it_ok.second);
76  return copy_table_info(table_info);
77 }
78 
80  decltype(cache_)().swap(cache_);
81 }
82 
83 namespace {
84 
85 bool uses_int_meta(const SQLTypeInfo& col_ti) {
86  return col_ti.is_integer() || col_ti.is_decimal() || col_ti.is_time() ||
87  col_ti.is_boolean() ||
88  (col_ti.is_string() && col_ti.get_compression() == kENCODING_DICT);
89 }
90 
92  std::vector<Fragmenter_Namespace::FragmentInfo> result;
93  if (rows) {
94  result.resize(1);
95  auto& fragment = result.front();
96  fragment.fragmentId = 0;
97  fragment.deviceIds.resize(3);
98  fragment.resultSet = rows.get();
99  fragment.resultSetMutex.reset(new std::mutex());
100  }
102  table_info.fragments = result;
103  return table_info;
104 }
105 
106 void collect_table_infos(std::vector<InputTableInfo>& table_infos,
107  const std::vector<InputDescriptor>& input_descs,
108  Executor* executor) {
109  const auto temporary_tables = executor->getTemporaryTables();
110  const auto cat = executor->getCatalog();
111  CHECK(cat);
112  std::unordered_map<int, size_t> info_cache;
113  for (const auto& input_desc : input_descs) {
114  const auto table_id = input_desc.getTableId();
115  const auto cached_index_it = info_cache.find(table_id);
116  if (cached_index_it != info_cache.end()) {
117  CHECK_LT(cached_index_it->second, table_infos.size());
118  table_infos.push_back(
119  {table_id, copy_table_info(table_infos[cached_index_it->second].info)});
120  continue;
121  }
122  if (input_desc.getSourceType() == InputSourceType::RESULT) {
123  CHECK_LT(table_id, 0);
124  CHECK(temporary_tables);
125  const auto it = temporary_tables->find(table_id);
126  LOG_IF(FATAL, it == temporary_tables->end())
127  << "Failed to find previous query result for node " << -table_id;
128  table_infos.push_back({table_id, synthesize_table_info(it->second)});
129  } else {
130  CHECK(input_desc.getSourceType() == InputSourceType::TABLE);
131  table_infos.push_back({table_id, executor->getTableInfo(table_id)});
132  }
133  CHECK(!table_infos.empty());
134  info_cache.insert(std::make_pair(table_id, table_infos.size() - 1));
135  }
136 }
137 
138 } // namespace
139 
140 template <typename T>
142  std::shared_ptr<ChunkMetadata>& chunk_metadata,
143  const T* values_buffer,
144  const size_t values_count,
145  const T null_val) {
146  T min_val{std::numeric_limits<T>::max()};
147  T max_val{std::numeric_limits<T>::lowest()};
148  bool has_nulls{false};
149  constexpr size_t parallel_stats_compute_threshold = 20000UL;
150  if (values_count < parallel_stats_compute_threshold) {
151  for (size_t row_idx = 0; row_idx < values_count; ++row_idx) {
152  const T cell_val = values_buffer[row_idx];
153  if (cell_val == null_val) {
154  has_nulls = true;
155  continue;
156  }
157  if (cell_val < min_val) {
158  min_val = cell_val;
159  }
160  if (cell_val > max_val) {
161  max_val = cell_val;
162  }
163  }
164  } else {
165  const size_t max_thread_count = std::thread::hardware_concurrency();
166  const size_t max_inputs_per_thread = 20000;
167  const size_t min_grain_size = max_inputs_per_thread / 2;
168  const size_t num_threads =
169  std::min(max_thread_count,
170  ((values_count + max_inputs_per_thread - 1) / max_inputs_per_thread));
171 
172  std::vector<T> threads_local_mins(num_threads, std::numeric_limits<T>::max());
173  std::vector<T> threads_local_maxes(num_threads, std::numeric_limits<T>::lowest());
174  std::vector<bool> threads_local_has_nulls(num_threads, false);
175  tbb::task_arena limited_arena(num_threads);
176 
177  limited_arena.execute([&] {
179  tbb::blocked_range<size_t>(0, values_count, min_grain_size),
180  [&](const tbb::blocked_range<size_t>& r) {
181  const size_t start_idx = r.begin();
182  const size_t end_idx = r.end();
183  T local_min_val = std::numeric_limits<T>::max();
184  T local_max_val = std::numeric_limits<T>::lowest();
185  bool local_has_nulls = false;
186  for (size_t row_idx = start_idx; row_idx < end_idx; ++row_idx) {
187  const T cell_val = values_buffer[row_idx];
188  if (cell_val == null_val) {
189  local_has_nulls = true;
190  continue;
191  }
192  if (cell_val < local_min_val) {
193  local_min_val = cell_val;
194  }
195  if (cell_val > local_max_val) {
196  local_max_val = cell_val;
197  }
198  }
199  size_t thread_idx = tbb::this_task_arena::current_thread_index();
200  if (local_min_val < threads_local_mins[thread_idx]) {
201  threads_local_mins[thread_idx] = local_min_val;
202  }
203  if (local_max_val > threads_local_maxes[thread_idx]) {
204  threads_local_maxes[thread_idx] = local_max_val;
205  }
206  if (local_has_nulls) {
207  threads_local_has_nulls[thread_idx] = true;
208  }
209  },
210  tbb::simple_partitioner());
211  });
212 
213  for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) {
214  if (threads_local_mins[thread_idx] < min_val) {
215  min_val = threads_local_mins[thread_idx];
216  }
217  if (threads_local_maxes[thread_idx] > max_val) {
218  max_val = threads_local_maxes[thread_idx];
219  }
220  has_nulls |= threads_local_has_nulls[thread_idx];
221  }
222  }
223  chunk_metadata->fillChunkStats(min_val, max_val, has_nulls);
224 }
225 
227  CHECK(rows->getQueryMemDesc().getQueryDescriptionType() ==
229  CHECK(rows->didOutputColumnar());
230  CHECK(!(rows->areAnyColumnsLazyFetched()));
231  const size_t col_count = rows->colCount();
232  const auto row_count = rows->entryCount();
233 
234  ChunkMetadataMap chunk_metadata_map;
235 
236  for (size_t col_idx = 0; col_idx < col_count; ++col_idx) {
237  std::shared_ptr<ChunkMetadata> chunk_metadata = std::make_shared<ChunkMetadata>();
238  const int8_t* columnar_buffer = const_cast<int8_t*>(rows->getColumnarBuffer(col_idx));
239  const auto col_sql_type_info = rows->getColType(col_idx);
240  // Here, min/max of a column of arrays, col, is defined as
241  // min/max(unnest(col)). That is, if is_array is true, the
242  // metadata is supposed to be syntesized for a query like `SELECT
243  // UNNEST(col_of_arrays) ... GROUP BY ...`. How can we verify that
244  // here?
245  bool is_array = col_sql_type_info.is_array();
246  const auto col_type =
247  (is_array ? col_sql_type_info.get_subtype() : col_sql_type_info.get_type());
248  const auto col_type_info =
249  (is_array ? col_sql_type_info.get_elem_type() : col_sql_type_info);
250 
251  chunk_metadata->sqlType = col_type_info;
252  chunk_metadata->numElements = row_count;
253 
254  const int8_t* values_buffer;
255  size_t values_count;
256  if (is_array) {
257  CHECK(FlatBufferManager::isFlatBuffer(columnar_buffer));
258  FlatBufferManager m{const_cast<int8_t*>(columnar_buffer)};
259  chunk_metadata->numBytes = m.flatbufferSize();
260  values_count = m.VarlenArray_nof_values();
261  values_buffer = m.VarlenArray_values();
262  } else {
263  chunk_metadata->numBytes = row_count * col_type_info.get_size();
264  values_count = row_count;
265  values_buffer = columnar_buffer;
266  }
267 
268  if (col_type != kTEXT) {
269  CHECK(col_type_info.get_compression() == kENCODING_NONE);
270  } else {
271  CHECK(col_type_info.get_compression() == kENCODING_DICT);
272  CHECK_EQ(col_type_info.get_size(), sizeof(int32_t));
273  }
274 
275  switch (col_type) {
276  case kBOOLEAN:
277  case kTINYINT:
279  chunk_metadata,
280  values_buffer,
281  values_count,
282  static_cast<int8_t>(inline_fixed_encoding_null_val(col_type_info)));
283  break;
284  case kSMALLINT:
286  chunk_metadata,
287  reinterpret_cast<const int16_t*>(values_buffer),
288  values_count,
289  static_cast<int16_t>(inline_fixed_encoding_null_val(col_type_info)));
290  break;
291  case kINT:
292  case kTEXT:
294  chunk_metadata,
295  reinterpret_cast<const int32_t*>(values_buffer),
296  values_count,
297  static_cast<int32_t>(inline_fixed_encoding_null_val(col_type_info)));
298  break;
299  case kBIGINT:
300  case kTIMESTAMP:
302  chunk_metadata,
303  reinterpret_cast<const int64_t*>(values_buffer),
304  values_count,
305  static_cast<int64_t>(inline_fixed_encoding_null_val(col_type_info)));
306  break;
307  case kFLOAT:
308  // For float use the typed null accessor as the generic one converts to double,
309  // and do not want to risk loss of precision
311  chunk_metadata,
312  reinterpret_cast<const float*>(values_buffer),
313  values_count,
315  break;
316  case kDOUBLE:
318  chunk_metadata,
319  reinterpret_cast<const double*>(values_buffer),
320  values_count,
322  break;
323  default:
324  UNREACHABLE();
325  }
326  chunk_metadata_map.emplace(col_idx, chunk_metadata);
327  }
328  return chunk_metadata_map;
329 }
330 
331 ChunkMetadataMap synthesize_metadata(const ResultSet* rows) {
332  auto timer = DEBUG_TIMER(__func__);
333  ChunkMetadataMap metadata_map;
334 
335  if (rows->definitelyHasNoRows()) {
336  // resultset has no valid storage, so we fill dummy metadata and return early
337  std::vector<std::unique_ptr<Encoder>> decoders;
338  for (size_t i = 0; i < rows->colCount(); ++i) {
339  decoders.emplace_back(Encoder::Create(nullptr, rows->getColType(i)));
340  const auto it_ok =
341  metadata_map.emplace(i, decoders.back()->getMetadata(rows->getColType(i)));
342  CHECK(it_ok.second);
343  }
344  return metadata_map;
345  }
346 
347  std::vector<std::vector<std::unique_ptr<Encoder>>> dummy_encoders;
348  const size_t worker_count =
350  for (size_t worker_idx = 0; worker_idx < worker_count; ++worker_idx) {
351  dummy_encoders.emplace_back();
352  for (size_t i = 0; i < rows->colCount(); ++i) {
353  const auto& col_ti = rows->getColType(i);
354  dummy_encoders.back().emplace_back(Encoder::Create(nullptr, col_ti));
355  }
356  }
357 
358  if (rows->getQueryMemDesc().getQueryDescriptionType() ==
361  }
362  rows->moveToBegin();
363  const auto do_work = [rows](const std::vector<TargetValue>& crt_row,
364  std::vector<std::unique_ptr<Encoder>>& dummy_encoders) {
365  for (size_t i = 0; i < rows->colCount(); ++i) {
366  const auto& col_ti = rows->getColType(i);
367  const auto& col_val = crt_row[i];
368  const auto scalar_col_val = boost::get<ScalarTargetValue>(&col_val);
369  CHECK(scalar_col_val);
370  if (uses_int_meta(col_ti)) {
371  const auto i64_p = boost::get<int64_t>(scalar_col_val);
372  CHECK(i64_p);
373  dummy_encoders[i]->updateStats(*i64_p, *i64_p == inline_int_null_val(col_ti));
374  } else if (col_ti.is_fp()) {
375  switch (col_ti.get_type()) {
376  case kFLOAT: {
377  const auto float_p = boost::get<float>(scalar_col_val);
378  CHECK(float_p);
379  dummy_encoders[i]->updateStats(*float_p,
380  *float_p == inline_fp_null_val(col_ti));
381  break;
382  }
383  case kDOUBLE: {
384  const auto double_p = boost::get<double>(scalar_col_val);
385  CHECK(double_p);
386  dummy_encoders[i]->updateStats(*double_p,
387  *double_p == inline_fp_null_val(col_ti));
388  break;
389  }
390  default:
391  CHECK(false);
392  }
393  } else {
394  throw std::runtime_error(col_ti.get_type_name() +
395  " is not supported in temporary table.");
396  }
397  }
398  };
400  const size_t worker_count = cpu_threads();
401  std::vector<std::future<void>> compute_stats_threads;
402  const auto entry_count = rows->entryCount();
403  for (size_t i = 0,
404  start_entry = 0,
405  stride = (entry_count + worker_count - 1) / worker_count;
406  i < worker_count && start_entry < entry_count;
407  ++i, start_entry += stride) {
408  const auto end_entry = std::min(start_entry + stride, entry_count);
409  compute_stats_threads.push_back(std::async(
411  [rows, &do_work, &dummy_encoders](
412  const size_t start, const size_t end, const size_t worker_idx) {
413  for (size_t i = start; i < end; ++i) {
414  const auto crt_row = rows->getRowAtNoTranslations(i);
415  if (!crt_row.empty()) {
416  do_work(crt_row, dummy_encoders[worker_idx]);
417  }
418  }
419  },
420  start_entry,
421  end_entry,
422  i));
423  }
424  for (auto& child : compute_stats_threads) {
425  child.wait();
426  }
427  for (auto& child : compute_stats_threads) {
428  child.get();
429  }
430  } else {
431  while (true) {
432  auto crt_row = rows->getNextRow(false, false);
433  if (crt_row.empty()) {
434  break;
435  }
436  do_work(crt_row, dummy_encoders[0]);
437  }
438  }
439  rows->moveToBegin();
440  for (size_t worker_idx = 1; worker_idx < worker_count; ++worker_idx) {
441  CHECK_LT(worker_idx, dummy_encoders.size());
442  const auto& worker_encoders = dummy_encoders[worker_idx];
443  for (size_t i = 0; i < rows->colCount(); ++i) {
444  dummy_encoders[0][i]->reduceStats(*worker_encoders[i]);
445  }
446  }
447  for (size_t i = 0; i < rows->colCount(); ++i) {
448  const auto it_ok =
449  metadata_map.emplace(i, dummy_encoders[0][i]->getMetadata(rows->getColType(i)));
450  CHECK(it_ok.second);
451  }
452  return metadata_map;
453 }
454 
455 size_t get_frag_count_of_table(const int table_id, Executor* executor) {
456  const auto temporary_tables = executor->getTemporaryTables();
457  CHECK(temporary_tables);
458  auto it = temporary_tables->find(table_id);
459  if (it != temporary_tables->end()) {
460  CHECK_GE(int(0), table_id);
461  return size_t(1);
462  } else {
463  const auto table_info = executor->getTableInfo(table_id);
464  return table_info.fragments.size();
465  }
466 }
467 
468 std::vector<InputTableInfo> get_table_infos(
469  const std::vector<InputDescriptor>& input_descs,
470  Executor* executor) {
471  std::vector<InputTableInfo> table_infos;
472  collect_table_infos(table_infos, input_descs, executor);
473  return table_infos;
474 }
475 
476 std::vector<InputTableInfo> get_table_infos(const RelAlgExecutionUnit& ra_exe_unit,
477  Executor* executor) {
478  std::vector<InputTableInfo> table_infos;
479  collect_table_infos(table_infos, ra_exe_unit.input_descs, executor);
480  return table_infos;
481 }
482 
485  bool need_to_compute_metadata = true;
486  // we disable chunk metadata recycler when filter pushdown is enabled
487  // since re-executing the query invalidates the cached metdata
488  // todo(yoonmin): relax this
489  bool enable_chunk_metadata_cache = g_enable_data_recycler &&
493  if (enable_chunk_metadata_cache) {
494  std::optional<ChunkMetadataMap> cached =
495  executor->getRecultSetRecyclerHolder().getCachedChunkMetadata(
496  resultSet->getQueryPlanHash());
497  if (cached) {
498  chunkMetadataMap = *cached;
499  need_to_compute_metadata = false;
500  }
501  }
502  if (need_to_compute_metadata) {
504  if (enable_chunk_metadata_cache && !chunkMetadataMap.empty()) {
505  executor->getRecultSetRecyclerHolder().putChunkMetadataToCache(
506  resultSet->getQueryPlanHash(),
507  resultSet->getInputTableKeys(),
509  }
510  }
512  }
513  return chunkMetadataMap;
514 }
515 
517  const {
518  ChunkMetadataMap metadata_map;
519  for (const auto& [column_id, chunk_metadata] : chunkMetadataMap) {
520  metadata_map[column_id] = std::make_shared<ChunkMetadata>(*chunk_metadata);
521  }
522  return metadata_map;
523 }
524 
526  std::unique_ptr<std::lock_guard<std::mutex>> lock;
527  if (resultSetMutex) {
528  lock.reset(new std::lock_guard<std::mutex>(*resultSetMutex));
529  }
530  CHECK_EQ(!!resultSet, !!resultSetMutex);
531  if (resultSet && !synthesizedNumTuplesIsValid) {
532  numTuples = resultSet->rowCount();
533  synthesizedNumTuplesIsValid = true;
534  }
535  return numTuples;
536 }
537 
539  if (!fragments.empty() && fragments.front().resultSet) {
540  return fragments.front().getNumTuples();
541  }
542  return numTuples;
543 }
544 
546  if (!fragments.empty() && fragments.front().resultSet) {
547  return fragments.front().resultSet->entryCount();
548  }
549  return numTuples;
550 }
551 
553  if (!fragments.empty() && fragments.front().resultSet) {
554  return fragments.front().resultSet->entryCount();
555  }
556  size_t fragment_num_tupples_upper_bound = 0;
557  for (const auto& fragment : fragments) {
558  fragment_num_tupples_upper_bound =
559  std::max(fragment.getNumTuples(), fragment_num_tupples_upper_bound);
560  }
561  return fragment_num_tupples_upper_bound;
562 }
ChunkMetadataMap synthesize_metadata_table_function(const ResultSet *rows)
#define CHECK_EQ(x, y)
Definition: Logger.h:297
ChunkMetadataMap getChunkMetadataMapPhysicalCopy() const
std::string cat(Ts &&...args)
Fragmenter_Namespace::TableInfo copy_table_info(const Fragmenter_Namespace::TableInfo &table_info)
ChunkMetadataMap synthesize_metadata(const ResultSet *rows)
static Encoder * Create(Data_Namespace::AbstractBuffer *buffer, const SQLTypeInfo sqlType)
Definition: Encoder.cpp:26
Executor * executor_
Definition: InputMetadata.h:48
std::vector< InputDescriptor > input_descs
#define UNREACHABLE()
Definition: Logger.h:333
#define CHECK_GE(x, y)
Definition: Logger.h:302
std::shared_ptr< ResultSet > ResultSetPtr
std::vector< FragmentInfo > fragments
Definition: Fragmenter.h:171
std::vector< int > chunkKeyPrefix
Definition: Fragmenter.h:170
bool g_enable_data_recycler
Definition: Execute.cpp:146
const size_t max_inputs_per_thread
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
bool is_time() const
Definition: sqltypes.h:582
#define LOG_IF(severity, condition)
Definition: Logger.h:379
Fragmenter_Namespace::TableInfo build_table_info(const std::vector< const TableDescriptor * > &shard_tables)
bool g_use_chunk_metadata_cache
Definition: Execute.cpp:149
static std::shared_ptr< Executor > getExecutor(const ExecutorId id, const std::string &debug_dir="", const std::string &debug_file="", const SystemParameters &system_parameters=SystemParameters())
Definition: Execute.cpp:477
std::map< int, std::shared_ptr< ChunkMetadata >> ChunkMetadataMap
bool use_parallel_algorithms(const ResultSet &rows)
Definition: ResultSet.cpp:1580
size_t getPhysicalNumTuples() const
Definition: Fragmenter.h:164
Fragmenter_Namespace::TableInfo getTableInfo(const int table_id)
future< Result > async(Fn &&fn, Args &&...args)
bool uses_int_meta(const SQLTypeInfo &col_ti)
bool is_integer() const
Definition: sqltypes.h:578
size_t getFragmentNumTuplesUpperBound() const
Fragmenter_Namespace::TableInfo synthesize_table_info(const ResultSetPtr &rows)
const ChunkMetadataMap & getChunkMetadataMap() const
bool is_boolean() const
Definition: sqltypes.h:583
#define CHECK_LT(x, y)
Definition: Logger.h:299
Definition: sqltypes.h:67
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:388
constexpr float inline_fp_null_value< float >()
InputTableInfoCache(Executor *executor)
constexpr double inline_fp_null_value< double >()
int64_t flatbufferSize() const
Definition: FlatBuffer.h:219
void parallel_for(const blocked_range< Int > &range, const Body &body, const Partitioner &p=Partitioner())
bool g_enable_filter_push_down
Definition: Execute.cpp:95
#define CHECK(condition)
Definition: Logger.h:289
std::vector< InputTableInfo > get_table_infos(const std::vector< InputDescriptor > &input_descs, Executor *executor)
#define DEBUG_TIMER(name)
Definition: Logger.h:407
void compute_table_function_col_chunk_stats(std::shared_ptr< ChunkMetadata > &chunk_metadata, const T *values_buffer, const size_t values_count, const T null_val)
void setPhysicalNumTuples(const size_t physNumTuples)
Definition: Fragmenter.h:166
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
void collect_table_infos(std::vector< InputTableInfo > &table_infos, const std::vector< InputDescriptor > &input_descs, Executor *executor)
Definition: sqltypes.h:60
bool is_string() const
Definition: sqltypes.h:576
std::unordered_map< int, Fragmenter_Namespace::TableInfo > cache_
Definition: InputMetadata.h:47
size_t get_frag_count_of_table(const int table_id, Executor *executor)
HOST static DEVICE bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:186
int cpu_threads()
Definition: thread_count.h:25
bool is_decimal() const
Definition: sqltypes.h:579
DEVICE void swap(ARGS &&...args)
Definition: gpu_enabled.h:114
static const ExecutorId UNITARY_EXECUTOR_ID
Definition: Execute.h:376