OmniSciDB  fe05a0c208
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
anonymous_namespace{RelAlgExecutor.cpp} Namespace Reference

Classes

class  RexUsedInputsVisitor
 
struct  ErrorInfo
 

Functions

bool node_is_aggregate (const RelAlgNode *ra)
 
std::unordered_set< PhysicalInputget_physical_inputs (const Catalog_Namespace::Catalog &cat, const RelAlgNode *ra)
 
void set_parallelism_hints (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void prepare_string_dictionaries (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void prepare_foreign_table_for_execution (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void check_sort_node_source_constraint (const RelSort *sort)
 
const RelAlgNodeget_data_sink (const RelAlgNode *ra_node)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelCompound *compound, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelAggregate *aggregate, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelProject *project, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelTableFunction *table_func, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelFilter *filter, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelLogicalUnion *logical_union, const Catalog_Namespace::Catalog &)
 
int table_id_from_ra (const RelAlgNode *ra_node)
 
std::unordered_map< const
RelAlgNode *, int > 
get_input_nest_levels (const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_join_source_used_inputs (const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
 
void collect_used_input_desc (std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput * > &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
template<class RA >
std::pair< std::vector
< InputDescriptor >, std::list
< std::shared_ptr< const
InputColDescriptor > > > 
get_input_desc_impl (const RA *ra_node, const std::unordered_set< const RexInput * > &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
template<class RA >
std::tuple< std::vector
< InputDescriptor >, std::list
< std::shared_ptr< const
InputColDescriptor >
>, std::vector
< std::shared_ptr< RexInput > > > 
get_input_desc (const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
size_t get_scalar_sources_size (const RelCompound *compound)
 
size_t get_scalar_sources_size (const RelProject *project)
 
size_t get_scalar_sources_size (const RelTableFunction *table_func)
 
const RexScalarscalar_at (const size_t i, const RelCompound *compound)
 
const RexScalarscalar_at (const size_t i, const RelProject *project)
 
const RexScalarscalar_at (const size_t i, const RelTableFunction *table_func)
 
std::shared_ptr< Analyzer::Exprset_transient_dict (const std::shared_ptr< Analyzer::Expr > expr)
 
void set_transient_dict_maybe (std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)
 
std::shared_ptr< Analyzer::Exprcast_dict_to_none (const std::shared_ptr< Analyzer::Expr > &input)
 
template<class RA >
std::vector< std::shared_ptr
< Analyzer::Expr > > 
translate_scalar_sources (const RA *ra_node, const RelAlgTranslator &translator, const ::ExecutorType executor_type)
 
template<class RA >
std::vector< std::shared_ptr
< Analyzer::Expr > > 
translate_scalar_sources_for_update (const RA *ra_node, const RelAlgTranslator &translator, int32_t tableId, const Catalog_Namespace::Catalog &cat, const ColumnNameList &colNames, size_t starting_projection_column_idx)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
translate_groupby_exprs (const RelCompound *compound, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
translate_groupby_exprs (const RelAggregate *aggregate, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
QualsConjunctiveForm translate_quals (const RelCompound *compound, const RelAlgTranslator &translator)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelCompound *compound, const RelAlgTranslator &translator, const ExecutorType executor_type)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelAggregate *aggregate, const RelAlgTranslator &translator)
 
bool is_count_distinct (const Analyzer::Expr *expr)
 
bool is_agg (const Analyzer::Expr *expr)
 
SQLTypeInfo get_logical_type_for_expr (const Analyzer::Expr &expr)
 
template<class RA >
std::vector< TargetMetaInfoget_targets_meta (const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
 
template<>
std::vector< TargetMetaInfoget_targets_meta (const RelFilter *filter, const std::vector< Analyzer::Expr * > &target_exprs)
 
bool is_window_execution_unit (const RelAlgExecutionUnit &ra_exe_unit)
 
std::shared_ptr< Analyzer::Exprtransform_to_inner (const Analyzer::Expr *expr)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const std::string &columnName, const SQLTypeInfo &columnType, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const ColumnDescriptor *cd, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
int64_t int_value_from_numbers_ptr (const SQLTypeInfo &type_info, const int8_t *data)
 
const TableDescriptorget_shard_for_key (const TableDescriptor *td, const Catalog_Namespace::Catalog &cat, const Fragmenter_Namespace::InsertData &data)
 
std::list< Analyzer::OrderEntryget_order_entries (const RelSort *sort)
 
size_t get_scan_limit (const RelAlgNode *ra, const size_t limit)
 
bool first_oe_is_desc (const std::list< Analyzer::OrderEntry > &order_entries)
 
size_t groups_approx_upper_bound (const std::vector< InputTableInfo > &table_infos)
 
bool compute_output_buffer_size (const RelAlgExecutionUnit &ra_exe_unit)
 
bool exe_unit_has_quals (const RelAlgExecutionUnit ra_exe_unit)
 
RelAlgExecutionUnit decide_approx_count_distinct_implementation (const RelAlgExecutionUnit &ra_exe_unit_in, const std::vector< InputTableInfo > &table_infos, const Executor *executor, const ExecutorDeviceType device_type_in, std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned)
 
void build_render_targets (RenderInfo &render_info, const std::vector< Analyzer::Expr * > &work_unit_target_exprs, const std::vector< TargetMetaInfo > &targets_meta)
 
bool can_use_bump_allocator (const RelAlgExecutionUnit &ra_exe_unit, const CompilationOptions &co, const ExecutionOptions &eo)
 
ErrorInfo getErrorDescription (const int32_t error_code)
 
JoinType get_join_type (const RelAlgNode *ra)
 
std::unique_ptr< const
RexOperator
get_bitwise_equals (const RexScalar *scalar)
 
std::unique_ptr< const
RexOperator
get_bitwise_equals_conjunction (const RexScalar *scalar)
 
std::vector< JoinTypeleft_deep_join_types (const RelLeftDeepInnerJoin *left_deep_join)
 
template<class RA >
std::vector< size_t > do_table_reordering (std::vector< InputDescriptor > &input_descs, std::list< std::shared_ptr< const InputColDescriptor >> &input_col_descs, const JoinQualsPerNestingLevel &left_deep_join_quals, std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const RA *node, const std::vector< InputTableInfo > &query_infos, const Executor *executor)
 
std::vector< size_t > get_left_deep_join_input_sizes (const RelLeftDeepInnerJoin *left_deep_join)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
rewrite_quals (const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
 
std::vector< const RexScalar * > rex_to_conjunctive_form (const RexScalar *qual_expr)
 
std::shared_ptr< Analyzer::Exprbuild_logical_expression (const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
 
template<class QualsList >
bool list_contains_expression (const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
 
std::shared_ptr< Analyzer::Exprreverse_logical_distribution (const std::shared_ptr< Analyzer::Expr > &expr)
 
std::vector< std::shared_ptr
< Analyzer::Expr > > 
synthesize_inputs (const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
std::vector< std::shared_ptr
< Analyzer::Expr > > 
target_exprs_for_union (RelAlgNode const *input_node)
 
std::pair< std::vector
< TargetMetaInfo >
, std::vector< std::shared_ptr
< Analyzer::Expr > > > 
get_inputs_meta (const RelFilter *filter, const RelAlgTranslator &translator, const std::vector< std::shared_ptr< RexInput >> &inputs_owned, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 

Function Documentation

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::build_logical_expression ( const std::vector< std::shared_ptr< Analyzer::Expr >> &  factors,
const SQLOps  sql_op 
)

Definition at line 3609 of file RelAlgExecutor.cpp.

References CHECK, i, kONE, and Parser::OperExpr::normalize().

Referenced by reverse_logical_distribution().

3611  {
3612  CHECK(!factors.empty());
3613  auto acc = factors.front();
3614  for (size_t i = 1; i < factors.size(); ++i) {
3615  acc = Parser::OperExpr::normalize(sql_op, kONE, acc, factors[i]);
3616  }
3617  return acc;
3618 }
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:284
Definition: sqldefs.h:69
#define CHECK(condition)
Definition: Logger.h:203

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::build_render_targets ( RenderInfo render_info,
const std::vector< Analyzer::Expr * > &  work_unit_target_exprs,
const std::vector< TargetMetaInfo > &  targets_meta 
)

Definition at line 2834 of file RelAlgExecutor.cpp.

References CHECK_EQ, i, and RenderInfo::targets.

Referenced by RelAlgExecutor::executeWorkUnit().

2836  {
2837  CHECK_EQ(work_unit_target_exprs.size(), targets_meta.size());
2838  render_info.targets.clear();
2839  for (size_t i = 0; i < targets_meta.size(); ++i) {
2840  render_info.targets.emplace_back(std::make_shared<Analyzer::TargetEntry>(
2841  targets_meta[i].get_resname(),
2842  work_unit_target_exprs[i]->get_shared_ptr(),
2843  false));
2844  }
2845 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
std::vector< std::shared_ptr< Analyzer::TargetEntry > > targets
Definition: RenderInfo.h:37

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::can_use_bump_allocator ( const RelAlgExecutionUnit ra_exe_unit,
const CompilationOptions co,
const ExecutionOptions eo 
)
inline

Definition at line 2847 of file RelAlgExecutor.cpp.

References CompilationOptions::device_type, g_enable_bump_allocator, GPU, SortInfo::order_entries, ExecutionOptions::output_columnar_hint, and RelAlgExecutionUnit::sort_info.

Referenced by RelAlgExecutor::executeWorkUnit().

2849  {
2851  !eo.output_columnar_hint && ra_exe_unit.sort_info.order_entries.empty();
2852 }
const std::list< Analyzer::OrderEntry > order_entries
const SortInfo sort_info
ExecutorDeviceType device_type
bool g_enable_bump_allocator
Definition: Execute.cpp:109

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::cast_dict_to_none ( const std::shared_ptr< Analyzer::Expr > &  input)

Definition at line 1256 of file RelAlgExecutor.cpp.

References kENCODING_DICT, and kTEXT.

Referenced by translate_scalar_sources(), and translate_targets().

1257  {
1258  const auto& input_ti = input->get_type_info();
1259  if (input_ti.is_string() && input_ti.get_compression() == kENCODING_DICT) {
1260  return input->add_cast(SQLTypeInfo(kTEXT, input_ti.get_notnull()));
1261  }
1262  return input;
1263 }
Definition: sqltypes.h:51

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::check_sort_node_source_constraint ( const RelSort sort)
inline

Definition at line 489 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

489  {
490  CHECK_EQ(size_t(1), sort->inputCount());
491  const auto source = sort->getInput(0);
492  if (dynamic_cast<const RelSort*>(source)) {
493  throw std::runtime_error("Sort node not supported as input to another sort");
494  }
495 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::collect_used_input_desc ( std::vector< InputDescriptor > &  input_descs,
const Catalog_Namespace::Catalog cat,
std::unordered_set< std::shared_ptr< const InputColDescriptor >> &  input_col_descs_unique,
const RelAlgNode ra_node,
const std::unordered_set< const RexInput * > &  source_used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 1104 of file RelAlgExecutor.cpp.

References Catalog_Namespace::Catalog::getColumnIdBySpi(), table_id_from_ra(), RelAlgNode::toString(), and VLOG.

Referenced by get_input_desc_impl().

1110  {
1111  VLOG(3) << "ra_node=" << ra_node->toString()
1112  << " input_col_descs_unique.size()=" << input_col_descs_unique.size()
1113  << " source_used_inputs.size()=" << source_used_inputs.size();
1114  for (const auto used_input : source_used_inputs) {
1115  const auto input_ra = used_input->getSourceNode();
1116  const int table_id = table_id_from_ra(input_ra);
1117  const auto col_id = used_input->getIndex();
1118  auto it = input_to_nest_level.find(input_ra);
1119  if (it != input_to_nest_level.end()) {
1120  const int input_desc = it->second;
1121  input_col_descs_unique.insert(std::make_shared<const InputColDescriptor>(
1122  dynamic_cast<const RelScan*>(input_ra)
1123  ? cat.getColumnIdBySpi(table_id, col_id + 1)
1124  : col_id,
1125  table_id,
1126  input_desc));
1127  } else if (!dynamic_cast<const RelLogicalUnion*>(ra_node)) {
1128  throw std::runtime_error("Bushy joins not supported");
1129  }
1130  }
1131 }
int table_id_from_ra(const RelAlgNode *ra_node)
virtual std::string toString() const =0
#define VLOG(n)
Definition: Logger.h:297
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1593

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::compute_output_buffer_size ( const RelAlgExecutionUnit ra_exe_unit)

Determines whether a query needs to compute the size of its output buffer. Returns true for projection queries with no LIMIT or a LIMIT that exceeds the high scan limit threshold (meaning it would be cheaper to compute the number of rows passing or use the bump allocator than allocate the current scan limit per GPU)

Definition at line 2746 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::groupby_exprs, Executor::high_scan_limit, RelAlgExecutionUnit::scan_limit, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

2746  {
2747  for (const auto target_expr : ra_exe_unit.target_exprs) {
2748  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
2749  return false;
2750  }
2751  }
2752  if (ra_exe_unit.groupby_exprs.size() == 1 && !ra_exe_unit.groupby_exprs.front() &&
2753  (!ra_exe_unit.scan_limit || ra_exe_unit.scan_limit > Executor::high_scan_limit)) {
2754  return true;
2755  }
2756  return false;
2757 }
std::vector< Analyzer::Expr * > target_exprs
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
static const size_t high_scan_limit
Definition: Execute.h:452

+ Here is the caller graph for this function:

RelAlgExecutionUnit anonymous_namespace{RelAlgExecutor.cpp}::decide_approx_count_distinct_implementation ( const RelAlgExecutionUnit ra_exe_unit_in,
const std::vector< InputTableInfo > &  table_infos,
const Executor executor,
const ExecutorDeviceType  device_type_in,
std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned 
)

Definition at line 2764 of file RelAlgExecutor.cpp.

References Bitmap, CHECK, CHECK_GE, g_bigint_count, g_cluster, g_hll_precision_bits, get_agg_type(), get_count_distinct_sub_bitmap_count(), get_target_info(), getExpressionRange(), GPU, hll_size_for_rate(), i, Integer, kAPPROX_COUNT_DISTINCT, kCOUNT, kENCODING_DICT, kINT, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit(), and RelAlgExecutor::handleOutOfMemoryRetry().

2769  {
2770  RelAlgExecutionUnit ra_exe_unit = ra_exe_unit_in;
2771  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); ++i) {
2772  const auto target_expr = ra_exe_unit.target_exprs[i];
2773  const auto agg_info = get_target_info(target_expr, g_bigint_count);
2774  if (agg_info.agg_kind != kAPPROX_COUNT_DISTINCT) {
2775  continue;
2776  }
2777  CHECK(dynamic_cast<const Analyzer::AggExpr*>(target_expr));
2778  const auto arg = static_cast<Analyzer::AggExpr*>(target_expr)->get_own_arg();
2779  CHECK(arg);
2780  const auto& arg_ti = arg->get_type_info();
2781  // Avoid calling getExpressionRange for variable length types (string and array),
2782  // it'd trigger an assertion since that API expects to be called only for types
2783  // for which the notion of range is well-defined. A bit of a kludge, but the
2784  // logic to reject these types anyway is at lower levels in the stack and not
2785  // really worth pulling into a separate function for now.
2786  if (!(arg_ti.is_number() || arg_ti.is_boolean() || arg_ti.is_time() ||
2787  (arg_ti.is_string() && arg_ti.get_compression() == kENCODING_DICT))) {
2788  continue;
2789  }
2790  const auto arg_range = getExpressionRange(arg.get(), table_infos, executor);
2791  if (arg_range.getType() != ExpressionRangeType::Integer) {
2792  continue;
2793  }
2794  // When running distributed, the threshold for using the precise implementation
2795  // must be consistent across all leaves, otherwise we could have a mix of precise
2796  // and approximate bitmaps and we cannot aggregate them.
2797  const auto device_type = g_cluster ? ExecutorDeviceType::GPU : device_type_in;
2798  const auto bitmap_sz_bits = arg_range.getIntMax() - arg_range.getIntMin() + 1;
2799  const auto sub_bitmap_count =
2800  get_count_distinct_sub_bitmap_count(bitmap_sz_bits, ra_exe_unit, device_type);
2801  int64_t approx_bitmap_sz_bits{0};
2802  const auto error_rate =
2803  static_cast<Analyzer::AggExpr*>(target_expr)->get_error_rate();
2804  if (error_rate) {
2805  CHECK(error_rate->get_type_info().get_type() == kINT);
2806  CHECK_GE(error_rate->get_constval().intval, 1);
2807  approx_bitmap_sz_bits = hll_size_for_rate(error_rate->get_constval().intval);
2808  } else {
2809  approx_bitmap_sz_bits = g_hll_precision_bits;
2810  }
2811  CountDistinctDescriptor approx_count_distinct_desc{CountDistinctImplType::Bitmap,
2812  arg_range.getIntMin(),
2813  approx_bitmap_sz_bits,
2814  true,
2815  device_type,
2816  sub_bitmap_count};
2817  CountDistinctDescriptor precise_count_distinct_desc{CountDistinctImplType::Bitmap,
2818  arg_range.getIntMin(),
2819  bitmap_sz_bits,
2820  false,
2821  device_type,
2822  sub_bitmap_count};
2823  if (approx_count_distinct_desc.bitmapPaddedSizeBytes() >=
2824  precise_count_distinct_desc.bitmapPaddedSizeBytes()) {
2825  auto precise_count_distinct = makeExpr<Analyzer::AggExpr>(
2826  get_agg_type(kCOUNT, arg.get()), kCOUNT, arg, true, nullptr);
2827  target_exprs_owned.push_back(precise_count_distinct);
2828  ra_exe_unit.target_exprs[i] = precise_count_distinct.get();
2829  }
2830  }
2831  return ra_exe_unit;
2832 }
std::vector< Analyzer::Expr * > target_exprs
int hll_size_for_rate(const int err_percent)
Definition: HyperLogLog.h:115
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:79
#define CHECK_GE(x, y)
Definition: Logger.h:216
SQLTypeInfo get_agg_type(const SQLAgg agg_kind, const Analyzer::Expr *arg_expr)
int g_hll_precision_bits
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
bool g_bigint_count
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
Definition: sqldefs.h:76
#define CHECK(condition)
Definition: Logger.h:203
bool g_cluster
Definition: sqltypes.h:44

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::do_table_reordering ( std::vector< InputDescriptor > &  input_descs,
std::list< std::shared_ptr< const InputColDescriptor >> &  input_col_descs,
const JoinQualsPerNestingLevel left_deep_join_quals,
std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const RA *  node,
const std::vector< InputTableInfo > &  query_infos,
const Executor executor 
)

Definition at line 3453 of file RelAlgExecutor.cpp.

References cat(), CHECK, g_cluster, get_input_desc(), get_input_nest_levels(), get_node_input_permutation(), and table_is_replicated().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3460  {
3461  if (g_cluster) {
3462  // Disable table reordering in distributed mode. The aggregator does not have enough
3463  // information to break ties
3464  return {};
3465  }
3466  const auto& cat = *executor->getCatalog();
3467  for (const auto& table_info : query_infos) {
3468  if (table_info.table_id < 0) {
3469  continue;
3470  }
3471  const auto td = cat.getMetadataForTable(table_info.table_id);
3472  CHECK(td);
3473  if (table_is_replicated(td)) {
3474  return {};
3475  }
3476  }
3477  const auto input_permutation =
3478  get_node_input_permutation(left_deep_join_quals, query_infos, executor);
3479  input_to_nest_level = get_input_nest_levels(node, input_permutation);
3480  std::tie(input_descs, input_col_descs, std::ignore) =
3481  get_input_desc(node, input_to_nest_level, input_permutation, cat);
3482  return input_permutation;
3483 }
std::unordered_map< const RelAlgNode *, int > get_input_nest_levels(const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
std::string cat(Ts &&...args)
std::vector< node_t > get_node_input_permutation(const JoinQualsPerNestingLevel &left_deep_join_quals, const std::vector< InputTableInfo > &table_infos, const Executor *executor)
bool table_is_replicated(const TableDescriptor *td)
#define CHECK(condition)
Definition: Logger.h:203
bool g_cluster
std::tuple< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > >, std::vector< std::shared_ptr< RexInput > > > get_input_desc(const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::exe_unit_has_quals ( const RelAlgExecutionUnit  ra_exe_unit)
inline

Definition at line 2759 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::join_quals, RelAlgExecutionUnit::quals, and RelAlgExecutionUnit::simple_quals.

Referenced by RelAlgExecutor::executeWorkUnit().

2759  {
2760  return !(ra_exe_unit.quals.empty() && ra_exe_unit.join_quals.empty() &&
2761  ra_exe_unit.simple_quals.empty());
2762 }
const JoinQualsPerNestingLevel join_quals
std::list< std::shared_ptr< Analyzer::Expr > > quals
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::first_oe_is_desc ( const std::list< Analyzer::OrderEntry > &  order_entries)

Definition at line 2528 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

2528  {
2529  return !order_entries.empty() && order_entries.front().is_desc;
2530 }

+ Here is the caller graph for this function:

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals ( const RexScalar scalar)

Definition at line 3372 of file RelAlgExecutor.cpp.

References CHECK_EQ, kAND, kBW_EQ, kEQ, kISNULL, kOR, and RexVisitorBase< T >::visit().

Referenced by get_bitwise_equals_conjunction().

3372  {
3373  const auto condition = dynamic_cast<const RexOperator*>(scalar);
3374  if (!condition || condition->getOperator() != kOR || condition->size() != 2) {
3375  return nullptr;
3376  }
3377  const auto equi_join_condition =
3378  dynamic_cast<const RexOperator*>(condition->getOperand(0));
3379  if (!equi_join_condition || equi_join_condition->getOperator() != kEQ) {
3380  return nullptr;
3381  }
3382  const auto both_are_null_condition =
3383  dynamic_cast<const RexOperator*>(condition->getOperand(1));
3384  if (!both_are_null_condition || both_are_null_condition->getOperator() != kAND ||
3385  both_are_null_condition->size() != 2) {
3386  return nullptr;
3387  }
3388  const auto lhs_is_null =
3389  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(0));
3390  const auto rhs_is_null =
3391  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(1));
3392  if (!lhs_is_null || !rhs_is_null || lhs_is_null->getOperator() != kISNULL ||
3393  rhs_is_null->getOperator() != kISNULL) {
3394  return nullptr;
3395  }
3396  CHECK_EQ(size_t(1), lhs_is_null->size());
3397  CHECK_EQ(size_t(1), rhs_is_null->size());
3398  CHECK_EQ(size_t(2), equi_join_condition->size());
3399  const auto eq_lhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(0));
3400  const auto eq_rhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(1));
3401  const auto is_null_lhs = dynamic_cast<const RexInput*>(lhs_is_null->getOperand(0));
3402  const auto is_null_rhs = dynamic_cast<const RexInput*>(rhs_is_null->getOperand(0));
3403  if (!eq_lhs || !eq_rhs || !is_null_lhs || !is_null_rhs) {
3404  return nullptr;
3405  }
3406  std::vector<std::unique_ptr<const RexScalar>> eq_operands;
3407  if (*eq_lhs == *is_null_lhs && *eq_rhs == *is_null_rhs) {
3408  RexDeepCopyVisitor deep_copy_visitor;
3409  auto lhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(0));
3410  auto rhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(1));
3411  eq_operands.emplace_back(lhs_op_copy.release());
3412  eq_operands.emplace_back(rhs_op_copy.release());
3413  return boost::make_unique<const RexOperator>(
3414  kBW_EQ, eq_operands, equi_join_condition->getType());
3415  }
3416  return nullptr;
3417 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
Definition: sqldefs.h:38
Definition: sqldefs.h:30
virtual T visit(const RexScalar *rex_scalar) const
Definition: RexVisitor.h:27
Definition: sqldefs.h:37
Definition: sqldefs.h:31

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals_conjunction ( const RexScalar scalar)

Definition at line 3419 of file RelAlgExecutor.cpp.

References CHECK_GE, get_bitwise_equals(), i, and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

3420  {
3421  const auto condition = dynamic_cast<const RexOperator*>(scalar);
3422  if (condition && condition->getOperator() == kAND) {
3423  CHECK_GE(condition->size(), size_t(2));
3424  auto acc = get_bitwise_equals(condition->getOperand(0));
3425  if (!acc) {
3426  return nullptr;
3427  }
3428  for (size_t i = 1; i < condition->size(); ++i) {
3429  std::vector<std::unique_ptr<const RexScalar>> and_operands;
3430  and_operands.emplace_back(std::move(acc));
3431  and_operands.emplace_back(get_bitwise_equals_conjunction(condition->getOperand(i)));
3432  acc =
3433  boost::make_unique<const RexOperator>(kAND, and_operands, condition->getType());
3434  }
3435  return acc;
3436  }
3437  return get_bitwise_equals(scalar);
3438 }
std::unique_ptr< const RexOperator > get_bitwise_equals_conjunction(const RexScalar *scalar)
#define CHECK_GE(x, y)
Definition: Logger.h:216
Definition: sqldefs.h:37
std::unique_ptr< const RexOperator > get_bitwise_equals(const RexScalar *scalar)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const RelAlgNode* anonymous_namespace{RelAlgExecutor.cpp}::get_data_sink ( const RelAlgNode ra_node)

Definition at line 904 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), RelAlgNode::inputCount(), and join().

Referenced by get_input_desc_impl(), get_input_nest_levels(), get_inputs_meta(), get_join_source_used_inputs(), get_join_type(), and get_used_inputs().

904  {
905  if (auto table_func = dynamic_cast<const RelTableFunction*>(ra_node)) {
906  return table_func;
907  }
908  if (auto join = dynamic_cast<const RelJoin*>(ra_node)) {
909  CHECK_EQ(size_t(2), join->inputCount());
910  return join;
911  }
912  if (!dynamic_cast<const RelLogicalUnion*>(ra_node)) {
913  CHECK_EQ(size_t(1), ra_node->inputCount());
914  }
915  auto only_src = ra_node->getInput(0);
916  const bool is_join = dynamic_cast<const RelJoin*>(only_src) ||
917  dynamic_cast<const RelLeftDeepInnerJoin*>(only_src);
918  return is_join ? only_src : ra_node;
919 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
std::string join(T const &container, std::string const &delim)
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::tuple<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> >, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc ( const RA *  ra_node,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1195 of file RelAlgExecutor.cpp.

References get_input_desc_impl(), get_used_inputs(), and VLOG.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), and do_table_reordering().

1198  {
1199  std::unordered_set<const RexInput*> used_inputs;
1200  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
1201  std::tie(used_inputs, used_inputs_owned) = get_used_inputs(ra_node, cat);
1202  VLOG(3) << "used_inputs.size() = " << used_inputs.size();
1203  auto input_desc_pair = get_input_desc_impl(
1204  ra_node, used_inputs, input_to_nest_level, input_permutation, cat);
1205  return std::make_tuple(
1206  input_desc_pair.first, input_desc_pair.second, used_inputs_owned);
1207 }
std::pair< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > > > get_input_desc_impl(const RA *ra_node, const std::unordered_set< const RexInput * > &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs(const RelCompound *compound, const Catalog_Namespace::Catalog &cat)
#define VLOG(n)
Definition: Logger.h:297

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::pair<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc_impl ( const RA *  ra_node,
const std::unordered_set< const RexInput * > &  used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1136 of file RelAlgExecutor.cpp.

References collect_used_input_desc(), get_data_sink(), get_join_source_used_inputs(), InputDescriptor::getNestLevel(), gpu_enabled::sort(), and table_id_from_ra().

Referenced by get_input_desc().

1140  {
1141  std::vector<InputDescriptor> input_descs;
1142  const auto data_sink_node = get_data_sink(ra_node);
1143  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
1144  const auto input_node_idx =
1145  input_permutation.empty() ? input_idx : input_permutation[input_idx];
1146  auto input_ra = data_sink_node->getInput(input_node_idx);
1147  const int table_id = table_id_from_ra(input_ra);
1148  input_descs.emplace_back(table_id, input_idx);
1149  }
1150  std::sort(input_descs.begin(),
1151  input_descs.end(),
1152  [](const InputDescriptor& lhs, const InputDescriptor& rhs) {
1153  return lhs.getNestLevel() < rhs.getNestLevel();
1154  });
1155  std::unordered_set<std::shared_ptr<const InputColDescriptor>> input_col_descs_unique;
1156  collect_used_input_desc(input_descs,
1157  cat,
1158  input_col_descs_unique, // modified
1159  ra_node,
1160  used_inputs,
1161  input_to_nest_level);
1162  std::unordered_set<const RexInput*> join_source_used_inputs;
1163  std::vector<std::shared_ptr<RexInput>> join_source_used_inputs_owned;
1164  std::tie(join_source_used_inputs, join_source_used_inputs_owned) =
1165  get_join_source_used_inputs(ra_node, cat);
1166  collect_used_input_desc(input_descs,
1167  cat,
1168  input_col_descs_unique, // modified
1169  ra_node,
1170  join_source_used_inputs,
1171  input_to_nest_level);
1172  std::vector<std::shared_ptr<const InputColDescriptor>> input_col_descs(
1173  input_col_descs_unique.begin(), input_col_descs_unique.end());
1174 
1175  std::sort(input_col_descs.begin(),
1176  input_col_descs.end(),
1177  [](std::shared_ptr<const InputColDescriptor> const& lhs,
1178  std::shared_ptr<const InputColDescriptor> const& rhs) {
1179  return std::make_tuple(lhs->getScanDesc().getNestLevel(),
1180  lhs->getColId(),
1181  lhs->getScanDesc().getTableId()) <
1182  std::make_tuple(rhs->getScanDesc().getNestLevel(),
1183  rhs->getColId(),
1184  rhs->getScanDesc().getTableId());
1185  });
1186  return {input_descs,
1187  std::list<std::shared_ptr<const InputColDescriptor>>(input_col_descs.begin(),
1188  input_col_descs.end())};
1189 }
void collect_used_input_desc(std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput * > &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
DEVICE void sort(ARGS &&...args)
Definition: gpu_enabled.h:105
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_join_source_used_inputs(const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
int table_id_from_ra(const RelAlgNode *ra_node)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
int getNestLevel() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unordered_map<const RelAlgNode*, int> anonymous_namespace{RelAlgExecutor.cpp}::get_input_nest_levels ( const RelAlgNode ra_node,
const std::vector< size_t > &  input_permutation 
)

Definition at line 1041 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), logger::INFO, and LOG_IF.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), and do_table_reordering().

1043  {
1044  const auto data_sink_node = get_data_sink(ra_node);
1045  std::unordered_map<const RelAlgNode*, int> input_to_nest_level;
1046  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
1047  const auto input_node_idx =
1048  input_permutation.empty() ? input_idx : input_permutation[input_idx];
1049  const auto input_ra = data_sink_node->getInput(input_node_idx);
1050  // Having a non-zero mapped value (input_idx) results in the query being interpretted
1051  // as a JOIN within CodeGenerator::codegenColVar() due to rte_idx being set to the
1052  // mapped value (input_idx) which originates here. This would be incorrect for UNION.
1053  size_t const idx = dynamic_cast<const RelLogicalUnion*>(ra_node) ? 0 : input_idx;
1054  const auto it_ok = input_to_nest_level.emplace(input_ra, idx);
1055  CHECK(it_ok.second);
1056  LOG_IF(INFO, !input_permutation.empty())
1057  << "Assigned input " << input_ra->toString() << " to nest level " << input_idx;
1058  }
1059  return input_to_nest_level;
1060 }
#define LOG_IF(severity, condition)
Definition: Logger.h:293
#define CHECK(condition)
Definition: Logger.h:203
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::vector<TargetMetaInfo>, std::vector<std::shared_ptr<Analyzer::Expr> > > anonymous_namespace{RelAlgExecutor.cpp}::get_inputs_meta ( const RelFilter filter,
const RelAlgTranslator translator,
const std::vector< std::shared_ptr< RexInput >> &  inputs_owned,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 4153 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), get_exprs_not_owned(), get_targets_meta(), i, synthesize_inputs(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createFilterWorkUnit().

4156  {
4157  std::vector<TargetMetaInfo> in_metainfo;
4158  std::vector<std::shared_ptr<Analyzer::Expr>> exprs_owned;
4159  const auto data_sink_node = get_data_sink(filter);
4160  auto input_it = inputs_owned.begin();
4161  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
4162  const auto source = data_sink_node->getInput(nest_level);
4163  const auto scan_source = dynamic_cast<const RelScan*>(source);
4164  if (scan_source) {
4165  CHECK(source->getOutputMetainfo().empty());
4166  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources_owned;
4167  for (size_t i = 0; i < scan_source->size(); ++i, ++input_it) {
4168  scalar_sources_owned.push_back(translator.translateScalarRex(input_it->get()));
4169  }
4170  const auto source_metadata =
4171  get_targets_meta(scan_source, get_exprs_not_owned(scalar_sources_owned));
4172  in_metainfo.insert(
4173  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
4174  exprs_owned.insert(
4175  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
4176  } else {
4177  const auto& source_metadata = source->getOutputMetainfo();
4178  input_it += source_metadata.size();
4179  in_metainfo.insert(
4180  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
4181  const auto scalar_sources_owned = synthesize_inputs(
4182  data_sink_node, nest_level, source_metadata, input_to_nest_level);
4183  exprs_owned.insert(
4184  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
4185  }
4186  }
4187  return std::make_pair(in_metainfo, exprs_owned);
4188 }
std::vector< std::shared_ptr< Analyzer::Expr > > synthesize_inputs(const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
std::vector< Analyzer::Expr * > get_exprs_not_owned(const std::vector< std::shared_ptr< Analyzer::Expr >> &exprs)
Definition: Execute.h:252
std::vector< TargetMetaInfo > get_targets_meta(const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
#define CHECK(condition)
Definition: Logger.h:203
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_join_source_used_inputs ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1063 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, CHECK_GT, get_data_sink(), anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelAlgNode::inputCount(), join(), run_benchmark_import::result, RelAlgNode::toString(), and RexVisitorBase< T >::visit().

Referenced by get_input_desc_impl().

1064  {
1065  const auto data_sink_node = get_data_sink(ra_node);
1066  if (auto join = dynamic_cast<const RelJoin*>(data_sink_node)) {
1067  CHECK_EQ(join->inputCount(), 2u);
1068  const auto condition = join->getCondition();
1069  RexUsedInputsVisitor visitor(cat);
1070  auto condition_inputs = visitor.visit(condition);
1071  std::vector<std::shared_ptr<RexInput>> condition_inputs_owned(
1072  visitor.get_inputs_owned());
1073  return std::make_pair(condition_inputs, condition_inputs_owned);
1074  }
1075 
1076  if (auto left_deep_join = dynamic_cast<const RelLeftDeepInnerJoin*>(data_sink_node)) {
1077  CHECK_GE(left_deep_join->inputCount(), 2u);
1078  const auto condition = left_deep_join->getInnerCondition();
1079  RexUsedInputsVisitor visitor(cat);
1080  auto result = visitor.visit(condition);
1081  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
1082  ++nesting_level) {
1083  const auto outer_condition = left_deep_join->getOuterCondition(nesting_level);
1084  if (outer_condition) {
1085  const auto outer_result = visitor.visit(outer_condition);
1086  result.insert(outer_result.begin(), outer_result.end());
1087  }
1088  }
1089  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
1090  return std::make_pair(result, used_inputs_owned);
1091  }
1092 
1093  if (dynamic_cast<const RelLogicalUnion*>(ra_node)) {
1094  CHECK_GT(ra_node->inputCount(), 1u) << ra_node->toString();
1095  } else if (dynamic_cast<const RelTableFunction*>(ra_node)) {
1096  CHECK_GT(ra_node->inputCount(), 0u) << ra_node->toString();
1097  } else {
1098  CHECK_EQ(ra_node->inputCount(), 1u) << ra_node->toString();
1099  }
1100  return std::make_pair(std::unordered_set<const RexInput*>{},
1101  std::vector<std::shared_ptr<RexInput>>{});
1102 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
std::string join(T const &container, std::string const &delim)
#define CHECK_GE(x, y)
Definition: Logger.h:216
#define CHECK_GT(x, y)
Definition: Logger.h:215
virtual std::string toString() const =0
const size_t inputCount() const
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JoinType anonymous_namespace{RelAlgExecutor.cpp}::get_join_type ( const RelAlgNode ra)

Definition at line 3360 of file RelAlgExecutor.cpp.

References get_data_sink(), INNER, INVALID, and join().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3360  {
3361  auto sink = get_data_sink(ra);
3362  if (auto join = dynamic_cast<const RelJoin*>(sink)) {
3363  return join->getJoinType();
3364  }
3365  if (dynamic_cast<const RelLeftDeepInnerJoin*>(sink)) {
3366  return JoinType::INNER;
3367  }
3368 
3369  return JoinType::INVALID;
3370 }
std::string join(T const &container, std::string const &delim)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::get_left_deep_join_input_sizes ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 3485 of file RelAlgExecutor.cpp.

References get_node_output(), RelAlgNode::getInput(), i, and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3486  {
3487  std::vector<size_t> input_sizes;
3488  for (size_t i = 0; i < left_deep_join->inputCount(); ++i) {
3489  const auto inputs = get_node_output(left_deep_join->getInput(i));
3490  input_sizes.push_back(inputs.size());
3491  }
3492  return input_sizes;
3493 }
const RelAlgNode * getInput(const size_t idx) const
RANodeOutput get_node_output(const RelAlgNode *ra_node)
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

SQLTypeInfo anonymous_namespace{RelAlgExecutor.cpp}::get_logical_type_for_expr ( const Analyzer::Expr expr)
inline

Definition at line 1450 of file RelAlgExecutor.cpp.

References get_logical_type_info(), get_nullable_logical_type_info(), Analyzer::Expr::get_type_info(), is_agg(), is_count_distinct(), and kBIGINT.

Referenced by get_targets_meta().

1450  {
1451  if (is_count_distinct(&expr)) {
1452  return SQLTypeInfo(kBIGINT, false);
1453  } else if (is_agg(&expr)) {
1455  }
1456  return get_logical_type_info(expr.get_type_info());
1457 }
bool is_agg(const Analyzer::Expr *expr)
SQLTypeInfo get_nullable_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:932
SQLTypeInfo get_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:911
bool is_count_distinct(const Analyzer::Expr *expr)
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:78

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<Analyzer::OrderEntry> anonymous_namespace{RelAlgExecutor.cpp}::get_order_entries ( const RelSort sort)

Definition at line 2508 of file RelAlgExecutor.cpp.

References RelSort::collationCount(), Descending, First, RelSort::getCollation(), i, and run_benchmark_import::result.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

2508  {
2509  std::list<Analyzer::OrderEntry> result;
2510  for (size_t i = 0; i < sort->collationCount(); ++i) {
2511  const auto sort_field = sort->getCollation(i);
2512  result.emplace_back(sort_field.getField() + 1,
2513  sort_field.getSortDir() == SortDirection::Descending,
2514  sort_field.getNullsPosition() == NullSortedPosition::First);
2515  }
2516  return result;
2517 }
SortField getCollation(const size_t i) const
size_t collationCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unordered_set<PhysicalInput> anonymous_namespace{RelAlgExecutor.cpp}::get_physical_inputs ( const Catalog_Namespace::Catalog cat,
const RelAlgNode ra 
)

Definition at line 64 of file RelAlgExecutor.cpp.

References get_physical_inputs(), and Catalog_Namespace::Catalog::getColumnIdBySpi().

66  {
67  auto phys_inputs = get_physical_inputs(ra);
68  std::unordered_set<PhysicalInput> phys_inputs2;
69  for (auto& phi : phys_inputs) {
70  phys_inputs2.insert(
71  PhysicalInput{cat.getColumnIdBySpi(phi.table_id, phi.col_id), phi.table_id});
72  }
73  return phys_inputs2;
74 }
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1593

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelCompound compound)

Definition at line 1209 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSourcesSize().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1209  {
1210  return compound->getScalarSourcesSize();
1211 }
const size_t getScalarSourcesSize() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelProject project)

Definition at line 1213 of file RelAlgExecutor.cpp.

References RelProject::size().

1213  {
1214  return project->size();
1215 }
size_t size() const override

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelTableFunction table_func)

Definition at line 1217 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputsSize().

1217  {
1218  return table_func->getTableFuncInputsSize();
1219 }
size_t getTableFuncInputsSize() const

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scan_limit ( const RelAlgNode ra,
const size_t  limit 
)

Definition at line 2519 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit().

2519  {
2520  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
2521  if (aggregate) {
2522  return 0;
2523  }
2524  const auto compound = dynamic_cast<const RelCompound*>(ra);
2525  return (compound && compound->isAggregate()) ? 0 : limit;
2526 }

+ Here is the caller graph for this function:

const TableDescriptor* anonymous_namespace{RelAlgExecutor.cpp}::get_shard_for_key ( const TableDescriptor td,
const Catalog_Namespace::Catalog cat,
const Fragmenter_Namespace::InsertData data 
)

Definition at line 2196 of file RelAlgExecutor.cpp.

References CHECK, Fragmenter_Namespace::InsertData::columnIds, Fragmenter_Namespace::InsertData::data, Catalog_Namespace::Catalog::getPhysicalTablesDescriptors(), Catalog_Namespace::Catalog::getShardColumnMetadataForTable(), i, int_value_from_numbers_ptr(), and SHARD_FOR_KEY.

Referenced by RelAlgExecutor::executeSimpleInsert().

2198  {
2199  auto shard_column_md = cat.getShardColumnMetadataForTable(td);
2200  CHECK(shard_column_md);
2201  auto sharded_column_id = shard_column_md->columnId;
2202  const TableDescriptor* shard{nullptr};
2203  for (size_t i = 0; i < data.columnIds.size(); ++i) {
2204  if (data.columnIds[i] == sharded_column_id) {
2205  const auto shard_tables = cat.getPhysicalTablesDescriptors(td);
2206  const auto shard_count = shard_tables.size();
2207  CHECK(data.data[i].numbersPtr);
2208  auto value = int_value_from_numbers_ptr(shard_column_md->columnType,
2209  data.data[i].numbersPtr);
2210  const size_t shard_idx = SHARD_FOR_KEY(value, shard_count);
2211  shard = shard_tables[shard_idx];
2212  break;
2213  }
2214  }
2215  return shard;
2216 }
int64_t int_value_from_numbers_ptr(const SQLTypeInfo &type_info, const int8_t *data)
const ColumnDescriptor * getShardColumnMetadataForTable(const TableDescriptor *td) const
Definition: Catalog.cpp:4045
std::vector< const TableDescriptor * > getPhysicalTablesDescriptors(const TableDescriptor *logical_table_desc, bool populate_fragmenter=true) const
Definition: Catalog.cpp:4063
std::vector< DataBlockPtr > data
the number of rows being inserted
Definition: Fragmenter.h:64
#define CHECK(condition)
Definition: Logger.h:203
std::vector< int > columnIds
identifies the table into which the data is being inserted
Definition: Fragmenter.h:62
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<TargetMetaInfo> anonymous_namespace{RelAlgExecutor.cpp}::get_targets_meta ( const RA *  ra_node,
const std::vector< Analyzer::Expr * > &  target_exprs 
)

Definition at line 1460 of file RelAlgExecutor.cpp.

References CHECK, CHECK_EQ, get_logical_type_for_expr(), and i.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), get_inputs_meta(), and get_targets_meta().

1462  {
1463  std::vector<TargetMetaInfo> targets_meta;
1464  CHECK_EQ(ra_node->size(), target_exprs.size());
1465  for (size_t i = 0; i < ra_node->size(); ++i) {
1466  CHECK(target_exprs[i]);
1467  // TODO(alex): remove the count distinct type fixup.
1468  targets_meta.emplace_back(ra_node->getFieldName(i),
1469  get_logical_type_for_expr(*target_exprs[i]),
1470  target_exprs[i]->get_type_info());
1471  }
1472  return targets_meta;
1473 }
#define CHECK_EQ(x, y)
Definition: Logger.h:211
SQLTypeInfo get_logical_type_for_expr(const Analyzer::Expr &expr)
#define CHECK(condition)
Definition: Logger.h:203

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<>
std::vector<TargetMetaInfo> anonymous_namespace{RelAlgExecutor.cpp}::get_targets_meta ( const RelFilter filter,
const std::vector< Analyzer::Expr * > &  target_exprs 
)

Definition at line 1476 of file RelAlgExecutor.cpp.

References get_targets_meta(), RelAlgNode::getInput(), RelAlgNode::toString(), and UNREACHABLE.

1478  {
1479  RelAlgNode const* input0 = filter->getInput(0);
1480  if (auto const* input = dynamic_cast<RelCompound const*>(input0)) {
1481  return get_targets_meta(input, target_exprs);
1482  } else if (auto const* input = dynamic_cast<RelProject const*>(input0)) {
1483  return get_targets_meta(input, target_exprs);
1484  } else if (auto const* input = dynamic_cast<RelLogicalUnion const*>(input0)) {
1485  return get_targets_meta(input, target_exprs);
1486  } else if (auto const* input = dynamic_cast<RelAggregate const*>(input0)) {
1487  return get_targets_meta(input, target_exprs);
1488  } else if (auto const* input = dynamic_cast<RelScan const*>(input0)) {
1489  return get_targets_meta(input, target_exprs);
1490  }
1491  UNREACHABLE() << "Unhandled node type: " << input0->toString();
1492  return {};
1493 }
#define UNREACHABLE()
Definition: Logger.h:247
const RelAlgNode * getInput(const size_t idx) const
std::vector< TargetMetaInfo > get_targets_meta(const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
virtual std::string toString() const =0

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelCompound compound,
const Catalog_Namespace::Catalog cat 
)

Definition at line 922 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelCompound::getFilterExpr(), RelCompound::getScalarSource(), RelCompound::getScalarSourcesSize(), i, and RexVisitorBase< T >::visit().

Referenced by get_input_desc().

922  {
923  RexUsedInputsVisitor visitor(cat);
924  const auto filter_expr = compound->getFilterExpr();
925  std::unordered_set<const RexInput*> used_inputs =
926  filter_expr ? visitor.visit(filter_expr) : std::unordered_set<const RexInput*>{};
927  const auto sources_size = compound->getScalarSourcesSize();
928  for (size_t i = 0; i < sources_size; ++i) {
929  const auto source_inputs = visitor.visit(compound->getScalarSource(i));
930  used_inputs.insert(source_inputs.begin(), source_inputs.end());
931  }
932  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
933  return std::make_pair(used_inputs, used_inputs_owned);
934 }
const RexScalar * getFilterExpr() const
const size_t getScalarSourcesSize() const
const RexScalar * getScalarSource(const size_t i) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelAggregate aggregate,
const Catalog_Namespace::Catalog cat 
)

Definition at line 937 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, RelAggregate::getAggExprs(), RelAggregate::getGroupByCount(), RelAlgNode::getInput(), RelAlgNode::getOutputMetainfo(), i, and RelAlgNode::inputCount().

937  {
938  CHECK_EQ(size_t(1), aggregate->inputCount());
939  std::unordered_set<const RexInput*> used_inputs;
940  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
941  const auto source = aggregate->getInput(0);
942  const auto& in_metainfo = source->getOutputMetainfo();
943  const auto group_count = aggregate->getGroupByCount();
944  CHECK_GE(in_metainfo.size(), group_count);
945  for (size_t i = 0; i < group_count; ++i) {
946  auto synthesized_used_input = new RexInput(source, i);
947  used_inputs_owned.emplace_back(synthesized_used_input);
948  used_inputs.insert(synthesized_used_input);
949  }
950  for (const auto& agg_expr : aggregate->getAggExprs()) {
951  for (size_t i = 0; i < agg_expr->size(); ++i) {
952  const auto operand_idx = agg_expr->getOperand(i);
953  CHECK_GE(in_metainfo.size(), static_cast<size_t>(operand_idx));
954  auto synthesized_used_input = new RexInput(source, operand_idx);
955  used_inputs_owned.emplace_back(synthesized_used_input);
956  used_inputs.insert(synthesized_used_input);
957  }
958  }
959  return std::make_pair(used_inputs, used_inputs_owned);
960 }
const size_t getGroupByCount() const
#define CHECK_EQ(x, y)
Definition: Logger.h:211
#define CHECK_GE(x, y)
Definition: Logger.h:216
const RelAlgNode * getInput(const size_t idx) const
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
const size_t inputCount() const
const std::vector< TargetMetaInfo > & getOutputMetainfo() const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelProject project,
const Catalog_Namespace::Catalog cat 
)

Definition at line 963 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelProject::getProjectAt(), i, RelProject::size(), and RexVisitorBase< T >::visit().

963  {
964  RexUsedInputsVisitor visitor(cat);
965  std::unordered_set<const RexInput*> used_inputs;
966  for (size_t i = 0; i < project->size(); ++i) {
967  const auto proj_inputs = visitor.visit(project->getProjectAt(i));
968  used_inputs.insert(proj_inputs.begin(), proj_inputs.end());
969  }
970  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
971  return std::make_pair(used_inputs, used_inputs_owned);
972 }
size_t size() const override
const RexScalar * getProjectAt(const size_t idx) const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelTableFunction table_func,
const Catalog_Namespace::Catalog cat 
)

Definition at line 975 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelTableFunction::getTableFuncInputAt(), RelTableFunction::getTableFuncInputsSize(), i, and RexVisitorBase< T >::visit().

976  {
977  RexUsedInputsVisitor visitor(cat);
978  std::unordered_set<const RexInput*> used_inputs;
979  for (size_t i = 0; i < table_func->getTableFuncInputsSize(); ++i) {
980  const auto table_func_inputs = visitor.visit(table_func->getTableFuncInputAt(i));
981  used_inputs.insert(table_func_inputs.begin(), table_func_inputs.end());
982  }
983  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
984  return std::make_pair(used_inputs, used_inputs_owned);
985 }
size_t getTableFuncInputsSize() const
const RexScalar * getTableFuncInputAt(const size_t idx) const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelFilter filter,
const Catalog_Namespace::Catalog cat 
)

Definition at line 988 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), and i.

988  {
989  std::unordered_set<const RexInput*> used_inputs;
990  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
991  const auto data_sink_node = get_data_sink(filter);
992  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
993  const auto source = data_sink_node->getInput(nest_level);
994  const auto scan_source = dynamic_cast<const RelScan*>(source);
995  if (scan_source) {
996  CHECK(source->getOutputMetainfo().empty());
997  for (size_t i = 0; i < scan_source->size(); ++i) {
998  auto synthesized_used_input = new RexInput(scan_source, i);
999  used_inputs_owned.emplace_back(synthesized_used_input);
1000  used_inputs.insert(synthesized_used_input);
1001  }
1002  } else {
1003  const auto& partial_in_metadata = source->getOutputMetainfo();
1004  for (size_t i = 0; i < partial_in_metadata.size(); ++i) {
1005  auto synthesized_used_input = new RexInput(source, i);
1006  used_inputs_owned.emplace_back(synthesized_used_input);
1007  used_inputs.insert(synthesized_used_input);
1008  }
1009  }
1010  }
1011  return std::make_pair(used_inputs, used_inputs_owned);
1012 }
#define CHECK(condition)
Definition: Logger.h:203
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelLogicalUnion logical_union,
const Catalog_Namespace::Catalog  
)

Definition at line 1015 of file RelAlgExecutor.cpp.

References RelAlgNode::getInput(), i, RelAlgNode::inputCount(), and VLOG.

1015  {
1016  std::unordered_set<const RexInput*> used_inputs(logical_union->inputCount());
1017  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
1018  used_inputs_owned.reserve(logical_union->inputCount());
1019  VLOG(3) << "logical_union->inputCount()=" << logical_union->inputCount();
1020  auto const n_inputs = logical_union->inputCount();
1021  for (size_t nest_level = 0; nest_level < n_inputs; ++nest_level) {
1022  auto input = logical_union->getInput(nest_level);
1023  for (size_t i = 0; i < input->size(); ++i) {
1024  used_inputs_owned.emplace_back(std::make_shared<RexInput>(input, i));
1025  used_inputs.insert(used_inputs_owned.back().get());
1026  }
1027  }
1028  return std::make_pair(std::move(used_inputs), std::move(used_inputs_owned));
1029 }
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const
#define VLOG(n)
Definition: Logger.h:297

+ Here is the call graph for this function:

ErrorInfo anonymous_namespace{RelAlgExecutor.cpp}::getErrorDescription ( const int32_t  error_code)

Definition at line 3263 of file RelAlgExecutor.cpp.

References Executor::ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED, Executor::ERR_DIV_BY_ZERO, Executor::ERR_GEOS, Executor::ERR_INTERRUPTED, Executor::ERR_OUT_OF_CPU_MEM, Executor::ERR_OUT_OF_GPU_MEM, Executor::ERR_OUT_OF_RENDER_MEM, Executor::ERR_OUT_OF_TIME, Executor::ERR_OVERFLOW_OR_UNDERFLOW, Executor::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES, Executor::ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY, Executor::ERR_STRING_CONST_IN_RESULTSET, Executor::ERR_TOO_MANY_LITERALS, and Executor::ERR_UNSUPPORTED_SELF_JOIN.

Referenced by RelAlgExecutor::getErrorMessageFromCode().

3263  {
3264  switch (error_code) {
3266  return {.code = "ERR_DIV_BY_ZERO", .description = "Division by zero"};
3268  return {.code = "ERR_OUT_OF_GPU_MEM",
3269  .description =
3270  "Query couldn't keep the entire working set of columns in GPU memory"};
3272  return {.code = "ERR_UNSUPPORTED_SELF_JOIN",
3273  .description = "Self joins not supported yet"};
3275  return {.code = "ERR_OUT_OF_CPU_MEM",
3276  .description = "Not enough host memory to execute the query"};
3278  return {.code = "ERR_OVERFLOW_OR_UNDERFLOW",
3279  .description = "Overflow or underflow"};
3281  return {.code = "ERR_OUT_OF_TIME",
3282  .description = "Query execution has exceeded the time limit"};
3284  return {.code = "ERR_INTERRUPTED",
3285  .description = "Query execution has been interrupted"};
3287  return {
3288  .code = "ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED",
3289  .description = "Columnar conversion not supported for variable length types"};
3291  return {.code = "ERR_TOO_MANY_LITERALS",
3292  .description = "Too many literals in the query"};
3294  return {.code = "ERR_STRING_CONST_IN_RESULTSET",
3295  .description =
3296  "NONE ENCODED String types are not supported as input result set."};
3298  return {.code = "ERR_OUT_OF_RENDER_MEM",
3299  .description = "Not enough OpenGL memory to render the query results"};
3301  return {.code = "ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY",
3302  .description = "Streaming-Top-N not supported in Render Query"};
3304  return {.code = "ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES",
3305  .description = "Multiple distinct values encountered"};
3306  case Executor::ERR_GEOS:
3307  return {.code = "ERR_GEOS", .description = "ERR_GEOS"};
3308  default:
3309  return {.code = nullptr, .description = nullptr};
3310  }
3311 }
static const int32_t ERR_INTERRUPTED
Definition: Execute.h:1120
static const int32_t ERR_GEOS
Definition: Execute.h:1126
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1122
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1123
static const int32_t ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY
Definition: Execute.h:1124
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1121
static const int32_t ERR_DIV_BY_ZERO
Definition: Execute.h:1112
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1116
static const int32_t ERR_OVERFLOW_OR_UNDERFLOW
Definition: Execute.h:1118
static const int32_t ERR_OUT_OF_TIME
Definition: Execute.h:1119
static const int32_t ERR_UNSUPPORTED_SELF_JOIN
Definition: Execute.h:1115
static const int32_t ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
Definition: Execute.h:1125
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1113
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1117

+ Here is the caller graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::groups_approx_upper_bound ( const std::vector< InputTableInfo > &  table_infos)

Upper bound estimation for the number of groups. Not strictly correct and not tight, but if the tables involved are really small we shouldn't waste time doing the NDV estimation. We don't account for cross-joins and / or group by unnested array, which is the reason this estimation isn't entirely reliable.

Definition at line 2728 of file RelAlgExecutor.cpp.

References CHECK.

Referenced by RelAlgExecutor::executeWorkUnit().

2728  {
2729  CHECK(!table_infos.empty());
2730  const auto& first_table = table_infos.front();
2731  size_t max_num_groups = first_table.info.getNumTuplesUpperBound();
2732  for (const auto& table_info : table_infos) {
2733  if (table_info.info.getNumTuplesUpperBound() > max_num_groups) {
2734  max_num_groups = table_info.info.getNumTuplesUpperBound();
2735  }
2736  }
2737  return std::max(max_num_groups, size_t(1));
2738 }
#define CHECK(condition)
Definition: Logger.h:203

+ Here is the caller graph for this function:

template<class T >
int64_t anonymous_namespace{RelAlgExecutor.cpp}::insert_one_dict_str ( T *  col_data,
const std::string &  columnName,
const SQLTypeInfo columnType,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2094 of file RelAlgExecutor.cpp.

References CHECK, logger::ERROR, SQLTypeInfo::get_comp_param(), Analyzer::Constant::get_constval(), Analyzer::Constant::get_is_null(), Catalog_Namespace::Catalog::getMetadataForDict(), inline_fixed_encoding_null_val(), LOG, Datum::stringval, and omnisci.dtypes::T.

Referenced by RelAlgExecutor::executeSimpleInsert(), and insert_one_dict_str().

2098  {
2099  if (col_cv->get_is_null()) {
2100  *col_data = inline_fixed_encoding_null_val(columnType);
2101  } else {
2102  const int dict_id = columnType.get_comp_param();
2103  const auto col_datum = col_cv->get_constval();
2104  const auto& str = *col_datum.stringval;
2105  const auto dd = catalog.getMetadataForDict(dict_id);
2106  CHECK(dd && dd->stringDict);
2107  int32_t str_id = dd->stringDict->getOrAdd(str);
2108  if (!dd->dictIsTemp) {
2109  const auto checkpoint_ok = dd->stringDict->checkpoint();
2110  if (!checkpoint_ok) {
2111  throw std::runtime_error("Failed to checkpoint dictionary for column " +
2112  columnName);
2113  }
2114  }
2115  const bool invalid = str_id > max_valid_int_value<T>();
2116  if (invalid || str_id == inline_int_null_value<int32_t>()) {
2117  if (invalid) {
2118  LOG(ERROR) << "Could not encode string: " << str
2119  << ", the encoded value doesn't fit in " << sizeof(T) * 8
2120  << " bits. Will store NULL instead.";
2121  }
2122  str_id = inline_fixed_encoding_null_val(columnType);
2123  }
2124  *col_data = str_id;
2125  }
2126  return *col_data;
2127 }
#define LOG(tag)
Definition: Logger.h:194
bool get_is_null() const
Definition: Analyzer.h:334
const DictDescriptor * getMetadataForDict(int dict_ref, bool loadDict=true) const
Definition: Catalog.cpp:1494
std::string * stringval
Definition: sqltypes.h:214
Datum get_constval() const
Definition: Analyzer.h:335
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:323
#define CHECK(condition)
Definition: Logger.h:203
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class T >
int64_t anonymous_namespace{RelAlgExecutor.cpp}::insert_one_dict_str ( T *  col_data,
const ColumnDescriptor cd,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2130 of file RelAlgExecutor.cpp.

References ColumnDescriptor::columnName, ColumnDescriptor::columnType, and insert_one_dict_str().

2133  {
2134  return insert_one_dict_str(col_data, cd->columnName, cd->columnType, col_cv, catalog);
2135 }
int64_t insert_one_dict_str(T *col_data, const std::string &columnName, const SQLTypeInfo &columnType, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
SQLTypeInfo columnType
std::string columnName

+ Here is the call graph for this function:

int64_t anonymous_namespace{RelAlgExecutor.cpp}::int_value_from_numbers_ptr ( const SQLTypeInfo type_info,
const int8_t *  data 
)

Definition at line 2159 of file RelAlgExecutor.cpp.

References CHECK, SQLTypeInfo::get_compression(), SQLTypeInfo::get_logical_size(), SQLTypeInfo::get_size(), SQLTypeInfo::get_type(), kBIGINT, kCHAR, kDATE, kENCODING_DICT, kINT, kSMALLINT, kTEXT, kTIME, kTIMESTAMP, kTINYINT, and kVARCHAR.

Referenced by get_shard_for_key().

2159  {
2160  size_t sz = 0;
2161  switch (type_info.get_type()) {
2162  case kTINYINT:
2163  case kSMALLINT:
2164  case kINT:
2165  case kBIGINT:
2166  case kTIMESTAMP:
2167  case kTIME:
2168  case kDATE:
2169  sz = type_info.get_logical_size();
2170  break;
2171  case kTEXT:
2172  case kVARCHAR:
2173  case kCHAR:
2174  CHECK(type_info.get_compression() == kENCODING_DICT);
2175  sz = type_info.get_size();
2176  break;
2177  default:
2178  CHECK(false) << "Unexpected sharding key datatype";
2179  }
2180 
2181  switch (sz) {
2182  case 1:
2183  return *(reinterpret_cast<const int8_t*>(data));
2184  case 2:
2185  return *(reinterpret_cast<const int16_t*>(data));
2186  case 4:
2187  return *(reinterpret_cast<const int32_t*>(data));
2188  case 8:
2189  return *(reinterpret_cast<const int64_t*>(data));
2190  default:
2191  CHECK(false);
2192  return 0;
2193  }
2194 }
HOST DEVICE int get_size() const
Definition: sqltypes.h:324
Definition: sqltypes.h:48
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:314
int get_logical_size() const
Definition: sqltypes.h:325
Definition: sqltypes.h:51
Definition: sqltypes.h:52
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:322
Definition: sqltypes.h:40
#define CHECK(condition)
Definition: Logger.h:203
Definition: sqltypes.h:44

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_agg ( const Analyzer::Expr expr)

Definition at line 1438 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_aggtype(), kAVG, kMAX, kMIN, and kSUM.

Referenced by anonymous_namespace{RelAlgDagBuilder.cpp}::create_compound(), RelAlgExecutor::executeWorkUnit(), get_logical_type_for_expr(), and ResultSet::getSingleSlotTargetBitmap().

1438  {
1439  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
1440  if (agg_expr && agg_expr->get_contains_agg()) {
1441  auto agg_type = agg_expr->get_aggtype();
1442  if (agg_type == SQLAgg::kMIN || agg_type == SQLAgg::kMAX ||
1443  agg_type == SQLAgg::kSUM || agg_type == SQLAgg::kAVG) {
1444  return true;
1445  }
1446  }
1447  return false;
1448 }
Definition: sqldefs.h:73
Definition: sqldefs.h:75
SQLAgg get_aggtype() const
Definition: Analyzer.h:1095
Definition: sqldefs.h:74
Definition: sqldefs.h:72

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_count_distinct ( const Analyzer::Expr expr)

Definition at line 1433 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_is_distinct().

Referenced by get_logical_type_for_expr().

1433  {
1434  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
1435  return agg_expr && agg_expr->get_is_distinct();
1436 }
bool get_is_distinct() const
Definition: Analyzer.h:1098

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_window_execution_unit ( const RelAlgExecutionUnit ra_exe_unit)

Definition at line 1755 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

1755  {
1756  return std::any_of(ra_exe_unit.target_exprs.begin(),
1757  ra_exe_unit.target_exprs.end(),
1758  [](const Analyzer::Expr* expr) {
1759  return dynamic_cast<const Analyzer::WindowFunction*>(expr);
1760  });
1761 }
std::vector< Analyzer::Expr * > target_exprs

+ Here is the caller graph for this function:

std::vector<JoinType> anonymous_namespace{RelAlgExecutor.cpp}::left_deep_join_types ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 3440 of file RelAlgExecutor.cpp.

References CHECK_GE, RelLeftDeepInnerJoin::getOuterCondition(), INNER, RelAlgNode::inputCount(), and LEFT.

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::translateLeftDeepJoinFilter().

3440  {
3441  CHECK_GE(left_deep_join->inputCount(), size_t(2));
3442  std::vector<JoinType> join_types(left_deep_join->inputCount() - 1, JoinType::INNER);
3443  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
3444  ++nesting_level) {
3445  if (left_deep_join->getOuterCondition(nesting_level)) {
3446  join_types[nesting_level - 1] = JoinType::LEFT;
3447  }
3448  }
3449  return join_types;
3450 }
const RexScalar * getOuterCondition(const size_t nesting_level) const
#define CHECK_GE(x, y)
Definition: Logger.h:216
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class QualsList >
bool anonymous_namespace{RelAlgExecutor.cpp}::list_contains_expression ( const QualsList &  haystack,
const std::shared_ptr< Analyzer::Expr > &  needle 
)

Definition at line 3621 of file RelAlgExecutor.cpp.

Referenced by reverse_logical_distribution().

3622  {
3623  for (const auto& qual : haystack) {
3624  if (*qual == *needle) {
3625  return true;
3626  }
3627  }
3628  return false;
3629 }

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::node_is_aggregate ( const RelAlgNode ra)

Definition at line 58 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

58  {
59  const auto compound = dynamic_cast<const RelCompound*>(ra);
60  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
61  return ((compound && compound->isAggregate()) || aggregate);
62 }

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::prepare_foreign_table_for_execution ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 145 of file RelAlgExecutor.cpp.

References prepare_string_dictionaries(), and set_parallelism_hints().

Referenced by RelAlgExecutor::executeRelAlgQueryNoRetry(), and RelAlgExecutor::executeRelAlgStep().

146  {
147  // Iterate through ra_node inputs for types that need to be loaded pre-execution
148  // If they do not have valid metadata, load them into CPU memory to generate
149  // the metadata and leave them ready to be used by the query
150  set_parallelism_hints(ra_node, catalog);
151  prepare_string_dictionaries(ra_node, catalog);
152 }
void prepare_string_dictionaries(const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
void set_parallelism_hints(const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::prepare_string_dictionaries ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 110 of file RelAlgExecutor.cpp.

References CHECK, Data_Namespace::CPU_LEVEL, StorageType::FOREIGN_TABLE, get_physical_inputs(), Chunk_NS::Chunk::getChunk(), Catalog_Namespace::Catalog::getColumnIdBySpi(), Catalog_Namespace::Catalog::getDatabaseId(), Catalog_Namespace::Catalog::getDataMgr(), Catalog_Namespace::Catalog::getForeignTable(), Catalog_Namespace::Catalog::getMetadataForColumn(), Catalog_Namespace::Catalog::getMetadataForTable(), foreign_storage::is_metadata_placeholder(), and test_readcsv::table.

Referenced by prepare_foreign_table_for_execution().

111  {
112  for (const auto& physical_input : get_physical_inputs(&ra_node)) {
113  int table_id = physical_input.table_id;
114  auto table = catalog.getMetadataForTable(table_id, false);
115  if (table && table->storageType == StorageType::FOREIGN_TABLE) {
116  int col_id = catalog.getColumnIdBySpi(table_id, physical_input.col_id);
117  const auto col_desc = catalog.getMetadataForColumn(table_id, col_id);
118  auto foreign_table = catalog.getForeignTable(table_id);
119  if (col_desc->columnType.is_dict_encoded_type()) {
120  CHECK(foreign_table->fragmenter != nullptr);
121  for (const auto& fragment :
122  foreign_table->fragmenter->getFragmentsForQuery().fragments) {
123  ChunkKey chunk_key = {
124  catalog.getDatabaseId(), table_id, col_id, fragment.fragmentId};
125  const ChunkMetadataMap& metadata_map = fragment.getChunkMetadataMap();
126  CHECK(metadata_map.find(col_id) != metadata_map.end());
127  if (foreign_storage::is_metadata_placeholder(*(metadata_map.at(col_id)))) {
128  // When this goes out of scope it will stay in CPU cache but become
129  // evictable
130  std::shared_ptr<Chunk_NS::Chunk> chunk =
131  Chunk_NS::Chunk::getChunk(col_desc,
132  &(catalog.getDataMgr()),
133  chunk_key,
135  0,
136  0,
137  0);
138  }
139  }
140  }
141  }
142  }
143 }
const foreign_storage::ForeignTable * getForeignTable(const std::string &tableName) const
Definition: Catalog.cpp:1448
std::vector< int > ChunkKey
Definition: types.h:37
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:223
std::map< int, std::shared_ptr< ChunkMetadata >> ChunkMetadataMap
const ColumnDescriptor * getMetadataForColumn(int tableId, const std::string &colName) const
int getDatabaseId() const
Definition: Catalog.h:277
bool is_metadata_placeholder(const ChunkMetadata &metadata)
static std::shared_ptr< Chunk > getChunk(const ColumnDescriptor *cd, DataMgr *data_mgr, const ChunkKey &key, const MemoryLevel mem_level, const int deviceId, const size_t num_bytes, const size_t num_elems)
Definition: Chunk.cpp:28
#define CHECK(condition)
Definition: Logger.h:203
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
static constexpr char const * FOREIGN_TABLE
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1593

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::reverse_logical_distribution ( const std::shared_ptr< Analyzer::Expr > &  expr)

Definition at line 3634 of file RelAlgExecutor.cpp.

References build_logical_expression(), CHECK_GE, i, kAND, kONE, kOR, list_contains_expression(), Parser::OperExpr::normalize(), qual_to_conjunctive_form(), and qual_to_disjunctive_form().

Referenced by RelAlgExecutor::makeJoinQuals().

3635  {
3636  const auto expr_terms = qual_to_disjunctive_form(expr);
3637  CHECK_GE(expr_terms.size(), size_t(1));
3638  const auto& first_term = expr_terms.front();
3639  const auto first_term_factors = qual_to_conjunctive_form(first_term);
3640  std::vector<std::shared_ptr<Analyzer::Expr>> common_factors;
3641  // First, collect the conjunctive components common to all the disjunctive components.
3642  // Don't do it for simple qualifiers, we only care about expensive or join qualifiers.
3643  for (const auto& first_term_factor : first_term_factors.quals) {
3644  bool is_common =
3645  expr_terms.size() > 1; // Only report common factors for disjunction.
3646  for (size_t i = 1; i < expr_terms.size(); ++i) {
3647  const auto crt_term_factors = qual_to_conjunctive_form(expr_terms[i]);
3648  if (!list_contains_expression(crt_term_factors.quals, first_term_factor)) {
3649  is_common = false;
3650  break;
3651  }
3652  }
3653  if (is_common) {
3654  common_factors.push_back(first_term_factor);
3655  }
3656  }
3657  if (common_factors.empty()) {
3658  return expr;
3659  }
3660  // Now that the common expressions are known, collect the remaining expressions.
3661  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_terms;
3662  for (const auto& term : expr_terms) {
3663  const auto term_cf = qual_to_conjunctive_form(term);
3664  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_quals(
3665  term_cf.simple_quals.begin(), term_cf.simple_quals.end());
3666  for (const auto& qual : term_cf.quals) {
3667  if (!list_contains_expression(common_factors, qual)) {
3668  remaining_quals.push_back(qual);
3669  }
3670  }
3671  if (!remaining_quals.empty()) {
3672  remaining_terms.push_back(build_logical_expression(remaining_quals, kAND));
3673  }
3674  }
3675  // Reconstruct the expression with the transformation applied.
3676  const auto common_expr = build_logical_expression(common_factors, kAND);
3677  if (remaining_terms.empty()) {
3678  return common_expr;
3679  }
3680  const auto remaining_expr = build_logical_expression(remaining_terms, kOR);
3681  return Parser::OperExpr::normalize(kAND, kONE, common_expr, remaining_expr);
3682 }
Definition: sqldefs.h:38
#define CHECK_GE(x, y)
Definition: Logger.h:216
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
bool list_contains_expression(const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
Definition: sqldefs.h:37
std::shared_ptr< Analyzer::Expr > build_logical_expression(const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:284
Definition: sqldefs.h:69
std::vector< std::shared_ptr< Analyzer::Expr > > qual_to_disjunctive_form(const std::shared_ptr< Analyzer::Expr > &qual_expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::rewrite_quals ( const std::list< std::shared_ptr< Analyzer::Expr >> &  quals)

Definition at line 3495 of file RelAlgExecutor.cpp.

References rewrite_expr().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

3496  {
3497  std::list<std::shared_ptr<Analyzer::Expr>> rewritten_quals;
3498  for (const auto& qual : quals) {
3499  const auto rewritten_qual = rewrite_expr(qual.get());
3500  rewritten_quals.push_back(rewritten_qual ? rewritten_qual : qual);
3501  }
3502  return rewritten_quals;
3503 }
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<const RexScalar*> anonymous_namespace{RelAlgExecutor.cpp}::rex_to_conjunctive_form ( const RexScalar qual_expr)

Definition at line 3594 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, i, and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

3594  {
3595  CHECK(qual_expr);
3596  const auto bin_oper = dynamic_cast<const RexOperator*>(qual_expr);
3597  if (!bin_oper || bin_oper->getOperator() != kAND) {
3598  return {qual_expr};
3599  }
3600  CHECK_GE(bin_oper->size(), size_t(2));
3601  auto lhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(0));
3602  for (size_t i = 1; i < bin_oper->size(); ++i) {
3603  const auto rhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(i));
3604  lhs_cf.insert(lhs_cf.end(), rhs_cf.begin(), rhs_cf.end());
3605  }
3606  return lhs_cf;
3607 }
#define CHECK_GE(x, y)
Definition: Logger.h:216
std::vector< const RexScalar * > rex_to_conjunctive_form(const RexScalar *qual_expr)
Definition: sqldefs.h:37
#define CHECK(condition)
Definition: Logger.h:203

+ Here is the caller graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelCompound compound 
)

Definition at line 1221 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSource().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1221  {
1222  return compound->getScalarSource(i);
1223 }
const RexScalar * getScalarSource(const size_t i) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelProject project 
)

Definition at line 1225 of file RelAlgExecutor.cpp.

References RelProject::getProjectAt().

1225  {
1226  return project->getProjectAt(i);
1227 }
const RexScalar * getProjectAt(const size_t idx) const

+ Here is the call graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelTableFunction table_func 
)

Definition at line 1229 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputAt().

1229  {
1230  return table_func->getTableFuncInputAt(i);
1231 }
const RexScalar * getTableFuncInputAt(const size_t idx) const

+ Here is the call graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 76 of file RelAlgExecutor.cpp.

References CHECK, Data_Namespace::CPU_LEVEL, StorageType::FOREIGN_TABLE, get_physical_inputs(), Catalog_Namespace::Catalog::getColumnIdBySpi(), Catalog_Namespace::Catalog::getDatabaseId(), Catalog_Namespace::Catalog::getDataMgr(), PersistentStorageMgr::getForeignStorageMgr(), Catalog_Namespace::Catalog::getForeignTable(), Catalog_Namespace::Catalog::getMetadataForColumn(), Catalog_Namespace::Catalog::getMetadataForTable(), Data_Namespace::DataMgr::getPersistentStorageMgr(), and test_readcsv::table.

Referenced by prepare_foreign_table_for_execution().

77  {
78  std::map<ChunkKey, std::set<foreign_storage::ForeignStorageMgr::ParallelismHint>>
79  parallelism_hints_per_table;
80  for (const auto& physical_input : get_physical_inputs(&ra_node)) {
81  int table_id = physical_input.table_id;
82  auto table = catalog.getMetadataForTable(table_id, false);
83  if (table && table->storageType == StorageType::FOREIGN_TABLE) {
84  int col_id = catalog.getColumnIdBySpi(table_id, physical_input.col_id);
85  const auto col_desc = catalog.getMetadataForColumn(table_id, col_id);
86  auto foreign_table = catalog.getForeignTable(table_id);
87  for (const auto& fragment :
88  foreign_table->fragmenter->getFragmentsForQuery().fragments) {
89  Chunk_NS::Chunk chunk{col_desc};
90  ChunkKey chunk_key = {
91  catalog.getDatabaseId(), table_id, col_id, fragment.fragmentId};
92  // do not include chunk hints that are in CPU memory
93  if (!chunk.isChunkOnDevice(
94  &catalog.getDataMgr(), chunk_key, Data_Namespace::CPU_LEVEL, 0)) {
95  parallelism_hints_per_table[{catalog.getDatabaseId(), table_id}].insert(
97  fragment.fragmentId});
98  }
99  }
100  }
101  }
102  if (!parallelism_hints_per_table.empty()) {
103  auto foreign_storage_mgr =
105  CHECK(foreign_storage_mgr);
106  foreign_storage_mgr->setParallelismHints(parallelism_hints_per_table);
107  }
108 }
const foreign_storage::ForeignTable * getForeignTable(const std::string &tableName) const
Definition: Catalog.cpp:1448
std::vector< int > ChunkKey
Definition: types.h:37
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:223
std::pair< int, int > ParallelismHint
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:571
foreign_storage::ForeignStorageMgr * getForeignStorageMgr() const
const ColumnDescriptor * getMetadataForColumn(int tableId, const std::string &colName) const
int getDatabaseId() const
Definition: Catalog.h:277
#define CHECK(condition)
Definition: Logger.h:203
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
static constexpr char const * FOREIGN_TABLE
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1593

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict ( const std::shared_ptr< Analyzer::Expr expr)

Definition at line 1233 of file RelAlgExecutor.cpp.

References kENCODING_DICT, kENCODING_NONE, and TRANSIENT_DICT_ID.

Referenced by set_transient_dict_maybe(), translate_groupby_exprs(), and translate_targets().

1234  {
1235  const auto& ti = expr->get_type_info();
1236  if (!ti.is_string() || ti.get_compression() != kENCODING_NONE) {
1237  return expr;
1238  }
1239  auto transient_dict_ti = ti;
1240  transient_dict_ti.set_compression(kENCODING_DICT);
1241  transient_dict_ti.set_comp_param(TRANSIENT_DICT_ID);
1242  transient_dict_ti.set_fixed_size();
1243  return expr->add_cast(transient_dict_ti);
1244 }
#define TRANSIENT_DICT_ID
Definition: sqltypes.h:253

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict_maybe ( std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::shared_ptr< Analyzer::Expr > &  expr 
)

Definition at line 1246 of file RelAlgExecutor.cpp.

References fold_expr(), and set_transient_dict().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1248  {
1249  try {
1250  scalar_sources.push_back(set_transient_dict(fold_expr(expr.get())));
1251  } catch (...) {
1252  scalar_sources.push_back(fold_expr(expr.get()));
1253  }
1254 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::synthesize_inputs ( const RelAlgNode ra_node,
const size_t  nest_level,
const std::vector< TargetMetaInfo > &  in_metainfo,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 3755 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, CHECK_LE, RelAlgNode::getInput(), RelAlgNode::inputCount(), and table_id_from_ra().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and get_inputs_meta().

3759  {
3760  CHECK_LE(size_t(1), ra_node->inputCount());
3761  CHECK_GE(size_t(2), ra_node->inputCount());
3762  const auto input = ra_node->getInput(nest_level);
3763  const auto it_rte_idx = input_to_nest_level.find(input);
3764  CHECK(it_rte_idx != input_to_nest_level.end());
3765  const int rte_idx = it_rte_idx->second;
3766  const int table_id = table_id_from_ra(input);
3767  std::vector<std::shared_ptr<Analyzer::Expr>> inputs;
3768  const auto scan_ra = dynamic_cast<const RelScan*>(input);
3769  int input_idx = 0;
3770  for (const auto& input_meta : in_metainfo) {
3771  inputs.push_back(
3772  std::make_shared<Analyzer::ColumnVar>(input_meta.get_type_info(),
3773  table_id,
3774  scan_ra ? input_idx + 1 : input_idx,
3775  rte_idx));
3776  ++input_idx;
3777  }
3778  return inputs;
3779 }
#define CHECK_GE(x, y)
Definition: Logger.h:216
const RelAlgNode * getInput(const size_t idx) const
#define CHECK_LE(x, y)
Definition: Logger.h:214
int table_id_from_ra(const RelAlgNode *ra_node)
#define CHECK(condition)
Definition: Logger.h:203
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int anonymous_namespace{RelAlgExecutor.cpp}::table_id_from_ra ( const RelAlgNode ra_node)

Definition at line 1031 of file RelAlgExecutor.cpp.

References CHECK, RelAlgNode::getId(), and RelScan::getTableDescriptor().

Referenced by collect_used_input_desc(), get_input_desc_impl(), and synthesize_inputs().

1031  {
1032  const auto scan_ra = dynamic_cast<const RelScan*>(ra_node);
1033  if (scan_ra) {
1034  const auto td = scan_ra->getTableDescriptor();
1035  CHECK(td);
1036  return td->tableId;
1037  }
1038  return -ra_node->getId();
1039 }
unsigned getId() const
#define CHECK(condition)
Definition: Logger.h:203
const TableDescriptor * getTableDescriptor() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::target_exprs_for_union ( RelAlgNode const input_node)

Definition at line 3913 of file RelAlgExecutor.cpp.

References RelAlgNode::getId(), RelAlgNode::getOutputMetainfo(), i, shared::printContainer(), and VLOG.

Referenced by RelAlgExecutor::createUnionWorkUnit().

3914  {
3915  std::vector<TargetMetaInfo> const& tmis = input_node->getOutputMetainfo();
3916  VLOG(3) << "input_node->getOutputMetainfo()=" << shared::printContainer(tmis);
3917  const int negative_node_id = -input_node->getId();
3918  std::vector<std::shared_ptr<Analyzer::Expr>> target_exprs;
3919  target_exprs.reserve(tmis.size());
3920  for (size_t i = 0; i < tmis.size(); ++i) {
3921  target_exprs.push_back(std::make_shared<Analyzer::ColumnVar>(
3922  tmis[i].get_type_info(), negative_node_id, i, 0));
3923  }
3924  return target_exprs;
3925 }
PrintContainer< CONTAINER > printContainer(CONTAINER &container)
Definition: misc.h:80
#define VLOG(n)
Definition: Logger.h:297

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::transform_to_inner ( const Analyzer::Expr expr)

Definition at line 1850 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::computeWindow().

1850  {
1851  const auto tuple = dynamic_cast<const Analyzer::ExpressionTuple*>(expr);
1852  if (tuple) {
1853  std::vector<std::shared_ptr<Analyzer::Expr>> transformed_tuple;
1854  for (const auto& element : tuple->getTuple()) {
1855  transformed_tuple.push_back(transform_to_inner(element.get()));
1856  }
1857  return makeExpr<Analyzer::ExpressionTuple>(transformed_tuple);
1858  }
1859  const auto col = dynamic_cast<const Analyzer::ColumnVar*>(expr);
1860  if (!col) {
1861  throw std::runtime_error("Only columns supported in the window partition for now");
1862  }
1863  return makeExpr<Analyzer::ColumnVar>(
1864  col->get_type_info(), col->get_table_id(), col->get_column_id(), 1);
1865 }
std::shared_ptr< Analyzer::Expr > transform_to_inner(const Analyzer::Expr *expr)

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelCompound compound,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 1329 of file RelAlgExecutor.cpp.

References RelCompound::getGroupByCount(), RelCompound::isAggregate(), and set_transient_dict().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

1331  {
1332  if (!compound->isAggregate()) {
1333  return {nullptr};
1334  }
1335  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
1336  for (size_t group_idx = 0; group_idx < compound->getGroupByCount(); ++group_idx) {
1337  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
1338  }
1339  return groupby_exprs;
1340 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
const size_t getGroupByCount() const
bool isAggregate() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelAggregate aggregate,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 1342 of file RelAlgExecutor.cpp.

References RelAggregate::getGroupByCount(), and set_transient_dict().

1344  {
1345  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
1346  for (size_t group_idx = 0; group_idx < aggregate->getGroupByCount(); ++group_idx) {
1347  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
1348  }
1349  return groupby_exprs;
1350 }
const size_t getGroupByCount() const
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)

+ Here is the call graph for this function:

QualsConjunctiveForm anonymous_namespace{RelAlgExecutor.cpp}::translate_quals ( const RelCompound compound,
const RelAlgTranslator translator 
)

Definition at line 1352 of file RelAlgExecutor.cpp.

References fold_expr(), RelCompound::getFilterExpr(), qual_to_conjunctive_form(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

1353  {
1354  const auto filter_rex = compound->getFilterExpr();
1355  const auto filter_expr =
1356  filter_rex ? translator.translateScalarRex(filter_rex) : nullptr;
1357  return filter_expr ? qual_to_conjunctive_form(fold_expr(filter_expr.get()))
1359 }
const RexScalar * getFilterExpr() const
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources ( const RA *  ra_node,
const RelAlgTranslator translator,
const ::ExecutorType  executor_type 
)

Definition at line 1266 of file RelAlgExecutor.cpp.

References cast_dict_to_none(), fold_expr(), get_scalar_sources_size(), i, Native, rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), RelAlgTranslator::translateScalarRex(), and VLOG.

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::createTableFunctionWorkUnit().

1269  {
1270  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
1271  const size_t scalar_sources_size = get_scalar_sources_size(ra_node);
1272  VLOG(3) << "get_scalar_sources_size(" << ra_node->toString()
1273  << ") = " << scalar_sources_size;
1274  for (size_t i = 0; i < scalar_sources_size; ++i) {
1275  const auto scalar_rex = scalar_at(i, ra_node);
1276  if (dynamic_cast<const RexRef*>(scalar_rex)) {
1277  // RexRef are synthetic scalars we append at the end of the real ones
1278  // for the sake of taking memory ownership, no real work needed here.
1279  continue;
1280  }
1281 
1282  const auto scalar_expr =
1283  rewrite_array_elements(translator.translateScalarRex(scalar_rex).get());
1284  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
1285  if (executor_type == ExecutorType::Native) {
1286  set_transient_dict_maybe(scalar_sources, rewritten_expr);
1287  } else {
1288  scalar_sources.push_back(cast_dict_to_none(fold_expr(rewritten_expr.get())));
1289  }
1290  }
1291 
1292  return scalar_sources;
1293 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t get_scalar_sources_size(const RelCompound *compound)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > cast_dict_to_none(const std::shared_ptr< Analyzer::Expr > &input)
const RexScalar * scalar_at(const size_t i, const RelCompound *compound)
#define VLOG(n)
Definition: Logger.h:297
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources_for_update ( const RA *  ra_node,
const RelAlgTranslator translator,
int32_t  tableId,
const Catalog_Namespace::Catalog cat,
const ColumnNameList colNames,
size_t  starting_projection_column_idx 
)

Definition at line 1296 of file RelAlgExecutor.cpp.

References cat(), get_scalar_sources_size(), i, rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), and RelAlgTranslator::translateScalarRex().

1302  {
1303  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
1304  for (size_t i = 0; i < get_scalar_sources_size(ra_node); ++i) {
1305  const auto scalar_rex = scalar_at(i, ra_node);
1306  if (dynamic_cast<const RexRef*>(scalar_rex)) {
1307  // RexRef are synthetic scalars we append at the end of the real ones
1308  // for the sake of taking memory ownership, no real work needed here.
1309  continue;
1310  }
1311 
1312  std::shared_ptr<Analyzer::Expr> translated_expr;
1313  if (i >= starting_projection_column_idx && i < get_scalar_sources_size(ra_node) - 1) {
1314  translated_expr = cast_to_column_type(translator.translateScalarRex(scalar_rex),
1315  tableId,
1316  cat,
1317  colNames[i - starting_projection_column_idx]);
1318  } else {
1319  translated_expr = translator.translateScalarRex(scalar_rex);
1320  }
1321  const auto scalar_expr = rewrite_array_elements(translated_expr.get());
1322  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
1323  set_transient_dict_maybe(scalar_sources, rewritten_expr);
1324  }
1325 
1326  return scalar_sources;
1327 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
std::string cat(Ts &&...args)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t get_scalar_sources_size(const RelCompound *compound)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
const RexScalar * scalar_at(const size_t i, const RelCompound *compound)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)

+ Here is the call graph for this function:

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelCompound compound,
const RelAlgTranslator translator,
const ExecutorType  executor_type 
)

Definition at line 1361 of file RelAlgExecutor.cpp.

References cast_dict_to_none(), CHECK, CHECK_GE, CHECK_LE, fold_expr(), RexRef::getIndex(), RelCompound::getTargetExpr(), i, Analyzer::Var::kGROUPBY, Native, rewrite_expr(), set_transient_dict(), RelCompound::size(), RelAlgTranslator::translateAggregateRex(), RelAlgTranslator::translateScalarRex(), and var_ref().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

1367  {
1368  std::vector<Analyzer::Expr*> target_exprs;
1369  for (size_t i = 0; i < compound->size(); ++i) {
1370  const auto target_rex = compound->getTargetExpr(i);
1371  const auto target_rex_agg = dynamic_cast<const RexAgg*>(target_rex);
1372  std::shared_ptr<Analyzer::Expr> target_expr;
1373  if (target_rex_agg) {
1374  target_expr =
1375  RelAlgTranslator::translateAggregateRex(target_rex_agg, scalar_sources);
1376  } else {
1377  const auto target_rex_scalar = dynamic_cast<const RexScalar*>(target_rex);
1378  const auto target_rex_ref = dynamic_cast<const RexRef*>(target_rex_scalar);
1379  if (target_rex_ref) {
1380  const auto ref_idx = target_rex_ref->getIndex();
1381  CHECK_GE(ref_idx, size_t(1));
1382  CHECK_LE(ref_idx, groupby_exprs.size());
1383  const auto groupby_expr = *std::next(groupby_exprs.begin(), ref_idx - 1);
1384  target_expr = var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, ref_idx);
1385  } else {
1386  target_expr = translator.translateScalarRex(target_rex_scalar);
1387  auto rewritten_expr = rewrite_expr(target_expr.get());
1388  target_expr = fold_expr(rewritten_expr.get());
1389  if (executor_type == ExecutorType::Native) {
1390  try {
1391  target_expr = set_transient_dict(target_expr);
1392  } catch (...) {
1393  // noop
1394  }
1395  } else {
1396  target_expr = cast_dict_to_none(target_expr);
1397  }
1398  }
1399  }
1400  CHECK(target_expr);
1401  target_exprs_owned.push_back(target_expr);
1402  target_exprs.push_back(target_expr.get());
1403  }
1404  return target_exprs;
1405 }
const Rex * getTargetExpr(const size_t i) const
size_t getIndex() const
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t size() const override
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1786
#define CHECK_GE(x, y)
Definition: Logger.h:216
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > cast_dict_to_none(const std::shared_ptr< Analyzer::Expr > &input)
#define CHECK_LE(x, y)
Definition: Logger.h:214
#define CHECK(condition)
Definition: Logger.h:203
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelAggregate aggregate,
const RelAlgTranslator translator 
)

Definition at line 1407 of file RelAlgExecutor.cpp.

References CHECK, fold_expr(), RelAggregate::getAggExprs(), Analyzer::Var::kGROUPBY, RelAlgTranslator::translateAggregateRex(), and var_ref().

1412  {
1413  std::vector<Analyzer::Expr*> target_exprs;
1414  size_t group_key_idx = 1;
1415  for (const auto& groupby_expr : groupby_exprs) {
1416  auto target_expr =
1417  var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, group_key_idx++);
1418  target_exprs_owned.push_back(target_expr);
1419  target_exprs.push_back(target_expr.get());
1420  }
1421 
1422  for (const auto& target_rex_agg : aggregate->getAggExprs()) {
1423  auto target_expr =
1424  RelAlgTranslator::translateAggregateRex(target_rex_agg.get(), scalar_sources);
1425  CHECK(target_expr);
1426  target_expr = fold_expr(target_expr.get());
1427  target_exprs_owned.push_back(target_expr);
1428  target_exprs.push_back(target_expr.get());
1429  }
1430  return target_exprs;
1431 }
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1786
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
#define CHECK(condition)
Definition: Logger.h:203
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)

+ Here is the call graph for this function: