OmniSciDB  04ee39c94c
anonymous_namespace{Execute.cpp} Namespace Reference

Classes

class  OutVecOwner
 

Functions

ResultSetPtr get_merged_result (std::vector< std::pair< ResultSetPtr, std::vector< size_t >>> &results_per_device)
 
size_t compute_buffer_entry_guess (const std::vector< InputTableInfo > &query_infos)
 
std::string get_table_name (const InputDescriptor &input_desc, const Catalog_Namespace::Catalog &cat)
 
size_t getDeviceBasedScanLimit (const ExecutorDeviceType device_type, const int device_count)
 
void checkWorkUnitWatchdog (const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &table_infos, const Catalog_Namespace::Catalog &cat, const ExecutorDeviceType device_type, const int device_count)
 
RelAlgExecutionUnit replace_scan_limit (const RelAlgExecutionUnit &ra_exe_unit_in, const size_t new_scan_limit)
 
int64_t inline_null_val (const SQLTypeInfo &ti, const bool float_argument_input)
 
void fill_entries_for_empty_input (std::vector< TargetInfo > &target_infos, std::vector< int64_t > &entry, const std::vector< Analyzer::Expr *> &target_exprs, const QueryMemoryDescriptor &query_mem_desc)
 
ResultSetPtr build_row_for_empty_input (const std::vector< Analyzer::Expr *> &target_exprs_in, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
 
bool has_lazy_fetched_columns (const std::vector< ColumnLazyFetchInfo > &fetched_cols)
 
const ColumnDescriptortry_get_column_descriptor (const InputColDescriptor *col_desc, const Catalog_Namespace::Catalog &cat)
 
bool check_rows_less_than_needed (const ResultSetPtr &results, const size_t scan_limit)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const std::string &columnName, const SQLTypeInfo &columnType, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const ColumnDescriptor *cd, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
int64_t get_hpt_scaled_value (const int64_t &val, const int32_t &ldim, const int32_t &rdim)
 

Function Documentation

◆ build_row_for_empty_input()

ResultSetPtr anonymous_namespace{Execute.cpp}::build_row_for_empty_input ( const std::vector< Analyzer::Expr *> &  target_exprs_in,
const QueryMemoryDescriptor query_mem_desc,
const ExecutorDeviceType  device_type 
)

Definition at line 1427 of file Execute.cpp.

References CHECK, fill_entries_for_empty_input(), and QueryMemoryDescriptor::getExecutor().

Referenced by Executor::collectAllDeviceResults().

1430  {
1431  std::vector<std::shared_ptr<Analyzer::Expr>> target_exprs_owned_copies;
1432  std::vector<Analyzer::Expr*> target_exprs;
1433  for (const auto target_expr : target_exprs_in) {
1434  const auto target_expr_copy =
1435  std::dynamic_pointer_cast<Analyzer::AggExpr>(target_expr->deep_copy());
1436  CHECK(target_expr_copy);
1437  auto ti = target_expr->get_type_info();
1438  ti.set_notnull(false);
1439  target_expr_copy->set_type_info(ti);
1440  if (target_expr_copy->get_arg()) {
1441  auto arg_ti = target_expr_copy->get_arg()->get_type_info();
1442  arg_ti.set_notnull(false);
1443  target_expr_copy->get_arg()->set_type_info(arg_ti);
1444  }
1445  target_exprs_owned_copies.push_back(target_expr_copy);
1446  target_exprs.push_back(target_expr_copy.get());
1447  }
1448  std::vector<TargetInfo> target_infos;
1449  std::vector<int64_t> entry;
1450  fill_entries_for_empty_input(target_infos, entry, target_exprs, query_mem_desc);
1451  const auto executor = query_mem_desc.getExecutor();
1452  CHECK(executor);
1453  auto row_set_mem_owner = executor->getRowSetMemoryOwner();
1454  CHECK(row_set_mem_owner);
1455  auto rs = std::make_shared<ResultSet>(
1456  target_infos, device_type, query_mem_desc, row_set_mem_owner, executor);
1457  rs->allocateStorage();
1458  rs->fillOneEntry(entry);
1459  return rs;
1460 }
const Executor * getExecutor() const
#define CHECK(condition)
Definition: Logger.h:187
void fill_entries_for_empty_input(std::vector< TargetInfo > &target_infos, std::vector< int64_t > &entry, const std::vector< Analyzer::Expr *> &target_exprs, const QueryMemoryDescriptor &query_mem_desc)
Definition: Execute.cpp:1371
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ check_rows_less_than_needed()

bool anonymous_namespace{Execute.cpp}::check_rows_less_than_needed ( const ResultSetPtr results,
const size_t  scan_limit 
)

Definition at line 2267 of file Execute.cpp.

References CHECK.

Referenced by Executor::executePlanWithGroupBy().

2267  {
2268  CHECK(scan_limit);
2269  return results && results->rowCount() < scan_limit;
2270 }
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the caller graph for this function:

◆ checkWorkUnitWatchdog()

void anonymous_namespace{Execute.cpp}::checkWorkUnitWatchdog ( const RelAlgExecutionUnit ra_exe_unit,
const std::vector< InputTableInfo > &  table_infos,
const Catalog_Namespace::Catalog cat,
const ExecutorDeviceType  device_type,
const int  device_count 
)

Definition at line 1008 of file Execute.cpp.

References SortInfo::algorithm, get_table_name(), getDeviceBasedScanLimit(), RelAlgExecutionUnit::groupby_exprs, Executor::high_scan_limit, RelAlgExecutionUnit::input_descs, join(), RelAlgExecutionUnit::scan_limit, RelAlgExecutionUnit::sort_info, StreamingTopN, RelAlgExecutionUnit::target_exprs, to_string(), and RelAlgExecutionUnit::use_bump_allocator.

Referenced by Executor::dispatchFragments().

1012  {
1013  for (const auto target_expr : ra_exe_unit.target_exprs) {
1014  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
1015  return;
1016  }
1017  }
1018  if (!ra_exe_unit.scan_limit && table_infos.size() == 1 &&
1019  table_infos.front().info.getPhysicalNumTuples() < Executor::high_scan_limit) {
1020  // Allow a query with no scan limit to run on small tables
1021  return;
1022  }
1023  if (ra_exe_unit.use_bump_allocator) {
1024  // Bump allocator removes the scan limit (and any knowledge of the size of the output
1025  // relative to the size of the input), so we bypass this check for now
1026  return;
1027  }
1028  if (ra_exe_unit.sort_info.algorithm != SortAlgorithm::StreamingTopN &&
1029  ra_exe_unit.groupby_exprs.size() == 1 && !ra_exe_unit.groupby_exprs.front() &&
1030  (!ra_exe_unit.scan_limit ||
1031  ra_exe_unit.scan_limit > getDeviceBasedScanLimit(device_type, device_count))) {
1032  std::vector<std::string> table_names;
1033  const auto& input_descs = ra_exe_unit.input_descs;
1034  for (const auto& input_desc : input_descs) {
1035  table_names.push_back(get_table_name(input_desc, cat));
1036  }
1037  if (!ra_exe_unit.scan_limit) {
1038  throw WatchdogException(
1039  "Projection query would require a scan without a limit on table(s): " +
1040  boost::algorithm::join(table_names, ", "));
1041  } else {
1042  throw WatchdogException(
1043  "Projection query output result set on table(s): " +
1044  boost::algorithm::join(table_names, ", ") + " would contain " +
1045  std::to_string(ra_exe_unit.scan_limit) +
1046  " rows, which is more than the current system limit of " +
1047  std::to_string(getDeviceBasedScanLimit(device_type, device_count)));
1048  }
1049  }
1050 }
std::vector< Analyzer::Expr * > target_exprs
std::string get_table_name(const InputDescriptor &input_desc, const Catalog_Namespace::Catalog &cat)
Definition: Execute.cpp:988
std::string join(T const &container, std::string const &delim)
const SortAlgorithm algorithm
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
const std::vector< InputDescriptor > input_descs
std::string to_string(char const *&&v)
static const size_t high_scan_limit
Definition: Execute.h:465
const SortInfo sort_info
size_t getDeviceBasedScanLimit(const ExecutorDeviceType device_type, const int device_count)
Definition: Execute.cpp:1000
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ compute_buffer_entry_guess()

size_t anonymous_namespace{Execute.cpp}::compute_buffer_entry_guess ( const std::vector< InputTableInfo > &  query_infos)

Definition at line 958 of file Execute.cpp.

References CHECK.

Referenced by Executor::executeWorkUnitImpl().

958  {
960  // Check for overflows since we're multiplying potentially big table sizes.
961  using checked_size_t = boost::multiprecision::number<
962  boost::multiprecision::cpp_int_backend<64,
963  64,
964  boost::multiprecision::unsigned_magnitude,
965  boost::multiprecision::checked,
966  void>>;
967  checked_size_t max_groups_buffer_entry_guess = 1;
968  for (const auto& query_info : query_infos) {
969  CHECK(!query_info.info.fragments.empty());
970  auto it = std::max_element(query_info.info.fragments.begin(),
971  query_info.info.fragments.end(),
972  [](const FragmentInfo& f1, const FragmentInfo& f2) {
973  return f1.getNumTuples() < f2.getNumTuples();
974  });
975  max_groups_buffer_entry_guess *= it->getNumTuples();
976  }
977  // Cap the rough approximation to 100M entries, it's unlikely we can do a great job for
978  // baseline group layout with that many entries anyway.
979  constexpr size_t max_groups_buffer_entry_guess_cap = 100000000;
980  try {
981  return std::min(static_cast<size_t>(max_groups_buffer_entry_guess),
982  max_groups_buffer_entry_guess_cap);
983  } catch (...) {
984  return max_groups_buffer_entry_guess_cap;
985  }
986 }
Used by Fragmenter classes to store info about each fragment - the fragment id and number of tuples(r...
Definition: Fragmenter.h:79
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the caller graph for this function:

◆ fill_entries_for_empty_input()

void anonymous_namespace{Execute.cpp}::fill_entries_for_empty_input ( std::vector< TargetInfo > &  target_infos,
std::vector< int64_t > &  entry,
const std::vector< Analyzer::Expr *> &  target_exprs,
const QueryMemoryDescriptor query_mem_desc 
)

Definition at line 1371 of file Execute.cpp.

References Bitmap, CHECK, checked_calloc(), g_bigint_count, g_cluster, get_target_info(), QueryMemoryDescriptor::getCountDistinctDescriptor(), QueryMemoryDescriptor::getExecutor(), inline_null_val(), kAPPROX_COUNT_DISTINCT, kAVG, kCOUNT, kSAMPLE, StdSet, and takes_float_argument().

Referenced by build_row_for_empty_input().

1374  {
1375  for (size_t target_idx = 0; target_idx < target_exprs.size(); ++target_idx) {
1376  const auto target_expr = target_exprs[target_idx];
1377  const auto agg_info = get_target_info(target_expr, g_bigint_count);
1378  CHECK(agg_info.is_agg);
1379  target_infos.push_back(agg_info);
1380  if (g_cluster) {
1381  const auto executor = query_mem_desc.getExecutor();
1382  CHECK(executor);
1383  auto row_set_mem_owner = executor->getRowSetMemoryOwner();
1384  CHECK(row_set_mem_owner);
1385  const auto& count_distinct_desc =
1386  query_mem_desc.getCountDistinctDescriptor(target_idx);
1387  if (count_distinct_desc.impl_type_ == CountDistinctImplType::Bitmap) {
1388  auto count_distinct_buffer = static_cast<int8_t*>(
1389  checked_calloc(count_distinct_desc.bitmapPaddedSizeBytes(), 1));
1390  CHECK(row_set_mem_owner);
1391  row_set_mem_owner->addCountDistinctBuffer(
1392  count_distinct_buffer, count_distinct_desc.bitmapPaddedSizeBytes(), true);
1393  entry.push_back(reinterpret_cast<int64_t>(count_distinct_buffer));
1394  continue;
1395  }
1396  if (count_distinct_desc.impl_type_ == CountDistinctImplType::StdSet) {
1397  auto count_distinct_set = new std::set<int64_t>();
1398  CHECK(row_set_mem_owner);
1399  row_set_mem_owner->addCountDistinctSet(count_distinct_set);
1400  entry.push_back(reinterpret_cast<int64_t>(count_distinct_set));
1401  continue;
1402  }
1403  }
1404  const bool float_argument_input = takes_float_argument(agg_info);
1405  if (agg_info.agg_kind == kCOUNT || agg_info.agg_kind == kAPPROX_COUNT_DISTINCT) {
1406  entry.push_back(0);
1407  } else if (agg_info.agg_kind == kAVG) {
1408  entry.push_back(inline_null_val(agg_info.sql_type, float_argument_input));
1409  entry.push_back(0);
1410  } else if (agg_info.agg_kind == kSAMPLE) {
1411  if (agg_info.sql_type.is_geometry()) {
1412  for (int i = 0; i < agg_info.sql_type.get_physical_coord_cols() * 2; i++) {
1413  entry.push_back(0);
1414  }
1415  } else if (agg_info.sql_type.is_varlen()) {
1416  entry.push_back(0);
1417  entry.push_back(0);
1418  } else {
1419  entry.push_back(inline_null_val(agg_info.sql_type, float_argument_input));
1420  }
1421  } else {
1422  entry.push_back(inline_null_val(agg_info.sql_type, float_argument_input));
1423  }
1424  }
1425 }
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:65
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:120
const Executor * getExecutor() const
int64_t inline_null_val(const SQLTypeInfo &ti, const bool float_argument_input)
Definition: Execute.cpp:1356
bool g_bigint_count
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:48
Definition: sqldefs.h:71
#define CHECK(condition)
Definition: Logger.h:187
bool g_cluster
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Definition: sqldefs.h:71
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_hpt_scaled_value()

int64_t anonymous_namespace{Execute.cpp}::get_hpt_scaled_value ( const int64_t &  val,
const int32_t &  ldim,
const int32_t &  rdim 
)

Definition at line 2938 of file Execute.cpp.

References CHECK, and DateTimeUtils::get_timestamp_precision_scale().

Referenced by Executor::skipFragment().

2940  {
2941  CHECK(ldim != rdim);
2942  return ldim > rdim ? val / DateTimeUtils::get_timestamp_precision_scale(ldim - rdim)
2943  : val * DateTimeUtils::get_timestamp_precision_scale(rdim - ldim);
2944 }
#define CHECK(condition)
Definition: Logger.h:187
constexpr int64_t get_timestamp_precision_scale(const int32_t dimen)
Definition: DateTimeUtils.h:48
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_merged_result()

ResultSetPtr anonymous_namespace{Execute.cpp}::get_merged_result ( std::vector< std::pair< ResultSetPtr, std::vector< size_t >>> &  results_per_device)

Definition at line 784 of file Execute.cpp.

References CHECK.

Referenced by Executor::resultsUnion().

785  {
786  auto& first = results_per_device.front().first;
787  CHECK(first);
788  for (size_t dev_idx = 1; dev_idx < results_per_device.size(); ++dev_idx) {
789  const auto& next = results_per_device[dev_idx].first;
790  CHECK(next);
791  first->append(*next);
792  }
793  return std::move(first);
794 }
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the caller graph for this function:

◆ get_table_name()

std::string anonymous_namespace{Execute.cpp}::get_table_name ( const InputDescriptor input_desc,
const Catalog_Namespace::Catalog cat 
)

Definition at line 988 of file Execute.cpp.

References CHECK, Catalog_Namespace::Catalog::getMetadataForTable(), InputDescriptor::getSourceType(), InputDescriptor::getTableId(), TABLE, and to_string().

Referenced by checkWorkUnitWatchdog().

989  {
990  const auto source_type = input_desc.getSourceType();
991  if (source_type == InputSourceType::TABLE) {
992  const auto td = cat.getMetadataForTable(input_desc.getTableId());
993  CHECK(td);
994  return td->tableName;
995  } else {
996  return "$TEMPORARY_TABLE" + std::to_string(-input_desc.getTableId());
997  }
998 }
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
InputSourceType getSourceType() const
int getTableId() const
std::string to_string(char const *&&v)
#define CHECK(condition)
Definition: Logger.h:187
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getDeviceBasedScanLimit()

size_t anonymous_namespace{Execute.cpp}::getDeviceBasedScanLimit ( const ExecutorDeviceType  device_type,
const int  device_count 
)
inline

Definition at line 1000 of file Execute.cpp.

References GPU, and Executor::high_scan_limit.

Referenced by checkWorkUnitWatchdog().

1001  {
1002  if (device_type == ExecutorDeviceType::GPU) {
1003  return device_count * Executor::high_scan_limit;
1004  }
1006 }
static const size_t high_scan_limit
Definition: Execute.h:465
+ Here is the caller graph for this function:

◆ has_lazy_fetched_columns()

bool anonymous_namespace{Execute.cpp}::has_lazy_fetched_columns ( const std::vector< ColumnLazyFetchInfo > &  fetched_cols)

Definition at line 1555 of file Execute.cpp.

Referenced by Executor::dispatchFragments().

1555  {
1556  for (const auto& col : fetched_cols) {
1557  if (col.is_lazily_fetched) {
1558  return true;
1559  }
1560  }
1561  return false;
1562 }
+ Here is the caller graph for this function:

◆ inline_null_val()

int64_t anonymous_namespace{Execute.cpp}::inline_null_val ( const SQLTypeInfo ti,
const bool  float_argument_input 
)

Definition at line 1356 of file Execute.cpp.

References CHECK, SQLTypeInfoCore< TYPE_FACET_PACK >::get_type(), inline_fp_null_val(), inline_int_null_val(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_boolean(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_fp(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_number(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_string(), SQLTypeInfoCore< TYPE_FACET_PACK >::is_time(), and kFLOAT.

Referenced by fill_entries_for_empty_input().

1356  {
1357  CHECK(ti.is_number() || ti.is_time() || ti.is_boolean() || ti.is_string());
1358  if (ti.is_fp()) {
1359  if (float_argument_input && ti.get_type() == kFLOAT) {
1360  int64_t float_null_val = 0;
1361  *reinterpret_cast<float*>(may_alias_ptr(&float_null_val)) =
1362  static_cast<float>(inline_fp_null_val(ti));
1363  return float_null_val;
1364  }
1365  const auto double_null_val = inline_fp_null_val(ti);
1366  return *reinterpret_cast<const int64_t*>(may_alias_ptr(&double_null_val));
1367  }
1368  return inline_int_null_val(ti);
1369 }
bool is_time() const
Definition: sqltypes.h:456
bool is_fp() const
Definition: sqltypes.h:454
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:323
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
bool is_boolean() const
Definition: sqltypes.h:457
bool is_number() const
Definition: sqltypes.h:455
#define CHECK(condition)
Definition: Logger.h:187
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
bool is_string() const
Definition: sqltypes.h:450
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ insert_one_dict_str() [1/2]

template<class T >
int64_t anonymous_namespace{Execute.cpp}::insert_one_dict_str ( T *  col_data,
const std::string &  columnName,
const SQLTypeInfo columnType,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2404 of file Execute.cpp.

References CHECK, logger::ERROR, SQLTypeInfoCore< TYPE_FACET_PACK >::get_comp_param(), Analyzer::Constant::get_constval(), Analyzer::Constant::get_is_null(), Catalog_Namespace::Catalog::getMetadataForDict(), inline_fixed_encoding_null_val(), LOG, and Datum::stringval.

2408  {
2409  if (col_cv->get_is_null()) {
2410  *col_data = inline_fixed_encoding_null_val(columnType);
2411  } else {
2412  const int dict_id = columnType.get_comp_param();
2413  const auto col_datum = col_cv->get_constval();
2414  const auto& str = *col_datum.stringval;
2415  const auto dd = catalog.getMetadataForDict(dict_id);
2416  CHECK(dd && dd->stringDict);
2417  int32_t str_id = dd->stringDict->getOrAdd(str);
2418  const bool checkpoint_ok = dd->stringDict->checkpoint();
2419  if (!checkpoint_ok) {
2420  throw std::runtime_error("Failed to checkpoint dictionary for column " +
2421  columnName);
2422  }
2423  const bool invalid = str_id > max_valid_int_value<T>();
2424  if (invalid || str_id == inline_int_null_value<int32_t>()) {
2425  if (invalid) {
2426  LOG(ERROR) << "Could not encode string: " << str
2427  << ", the encoded value doesn't fit in " << sizeof(T) * 8
2428  << " bits. Will store NULL instead.";
2429  }
2430  str_id = inline_fixed_encoding_null_val(columnType);
2431  }
2432  *col_data = str_id;
2433  }
2434  return *col_data;
2435 }
#define LOG(tag)
Definition: Logger.h:182
Datum get_constval() const
Definition: Analyzer.h:328
const DictDescriptor * getMetadataForDict(int dict_ref, bool loadDict=true) const
Definition: Catalog.cpp:1348
std::string * stringval
Definition: sqltypes.h:131
bool get_is_null() const
Definition: Analyzer.h:327
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:332
#define CHECK(condition)
Definition: Logger.h:187
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
+ Here is the call graph for this function:

◆ insert_one_dict_str() [2/2]

template<class T >
int64_t anonymous_namespace{Execute.cpp}::insert_one_dict_str ( T *  col_data,
const ColumnDescriptor cd,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2438 of file Execute.cpp.

References Importer_NS::appendDatum(), ColumnDescriptor::columnName, ColumnDescriptor::columnType, and anonymous_namespace{ImportTest.cpp}::d().

Referenced by Executor::executeSimpleInsert().

2441  {
2442  return insert_one_dict_str(col_data, cd->columnName, cd->columnType, col_cv, catalog);
2443 }
int64_t insert_one_dict_str(T *col_data, const ColumnDescriptor *cd, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
Definition: Execute.cpp:2438
SQLTypeInfo columnType
std::string columnName
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ replace_scan_limit()

RelAlgExecutionUnit anonymous_namespace{Execute.cpp}::replace_scan_limit ( const RelAlgExecutionUnit ra_exe_unit_in,
const size_t  new_scan_limit 
)

Definition at line 1078 of file Execute.cpp.

References RelAlgExecutionUnit::estimator, RelAlgExecutionUnit::groupby_exprs, RelAlgExecutionUnit::input_col_descs, RelAlgExecutionUnit::input_descs, RelAlgExecutionUnit::join_quals, RelAlgExecutionUnit::quals, RelAlgExecutionUnit::query_features, RelAlgExecutionUnit::simple_quals, RelAlgExecutionUnit::sort_info, RelAlgExecutionUnit::target_exprs, and RelAlgExecutionUnit::use_bump_allocator.

Referenced by Executor::executeWorkUnit().

1079  {
1080  return {ra_exe_unit_in.input_descs,
1081  ra_exe_unit_in.input_col_descs,
1082  ra_exe_unit_in.simple_quals,
1083  ra_exe_unit_in.quals,
1084  ra_exe_unit_in.join_quals,
1085  ra_exe_unit_in.groupby_exprs,
1086  ra_exe_unit_in.target_exprs,
1087  ra_exe_unit_in.estimator,
1088  ra_exe_unit_in.sort_info,
1089  new_scan_limit,
1090  ra_exe_unit_in.query_features,
1091  ra_exe_unit_in.use_bump_allocator};
1092 }
std::vector< Analyzer::Expr * > target_exprs
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
const std::vector< InputDescriptor > input_descs
QueryFeatureDescriptor query_features
const SortInfo sort_info
const JoinQualsPerNestingLevel join_quals
const std::shared_ptr< Analyzer::Estimator > estimator
std::list< std::shared_ptr< Analyzer::Expr > > quals
std::list< std::shared_ptr< const InputColDescriptor > > input_col_descs
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals
+ Here is the caller graph for this function:

◆ try_get_column_descriptor()

const ColumnDescriptor* anonymous_namespace{Execute.cpp}::try_get_column_descriptor ( const InputColDescriptor col_desc,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1794 of file Execute.cpp.

References get_column_descriptor_maybe(), InputColDescriptor::getColId(), InputColDescriptor::getScanDesc(), and InputDescriptor::getTableId().

Referenced by Executor::fetchChunks().

1795  {
1796  const int table_id = col_desc->getScanDesc().getTableId();
1797  const int col_id = col_desc->getColId();
1798  return get_column_descriptor_maybe(col_id, table_id, cat);
1799 }
int getTableId() const
const InputDescriptor & getScanDesc() const
const ColumnDescriptor * get_column_descriptor_maybe(const int col_id, const int table_id, const Catalog_Namespace::Catalog &cat)
Definition: Execute.h:168
+ Here is the call graph for this function:
+ Here is the caller graph for this function: