OmniSciDB  29e35f4d58
anonymous_namespace{RelAlgExecutor.cpp} Namespace Reference

Classes

class  RexUsedInputsVisitor
 

Functions

bool node_is_aggregate (const RelAlgNode *ra)
 
std::unordered_set< PhysicalInputget_physical_inputs (const Catalog_Namespace::Catalog &cat, const RelAlgNode *ra)
 
void check_sort_node_source_constraint (const RelSort *sort)
 
const RelAlgNodeget_data_sink (const RelAlgNode *ra_node)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs (const RelCompound *compound, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs (const RelAggregate *aggregate, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs (const RelProject *project, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs (const RelTableFunction *table_func, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs (const RelFilter *filter, const Catalog_Namespace::Catalog &cat)
 
int table_id_from_ra (const RelAlgNode *ra_node)
 
std::unordered_map< const RelAlgNode *, int > get_input_nest_levels (const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
 
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_join_source_used_inputs (const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
 
std::vector< const RelAlgNode * > get_non_join_sequence (const RelAlgNode *ra)
 
void collect_used_input_desc (std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput *> &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
template<class RA >
std::pair< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > > > get_input_desc_impl (const RA *ra_node, const std::unordered_set< const RexInput *> &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
template<class RA >
std::tuple< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > >, std::vector< std::shared_ptr< RexInput > > > get_input_desc (const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
size_t get_scalar_sources_size (const RelCompound *compound)
 
size_t get_scalar_sources_size (const RelProject *project)
 
size_t get_scalar_sources_size (const RelTableFunction *table_func)
 
const RexScalarscalar_at (const size_t i, const RelCompound *compound)
 
const RexScalarscalar_at (const size_t i, const RelProject *project)
 
const RexScalarscalar_at (const size_t i, const RelTableFunction *table_func)
 
std::shared_ptr< Analyzer::Exprset_transient_dict (const std::shared_ptr< Analyzer::Expr > expr)
 
void set_transient_dict_maybe (std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)
 
template<class RA >
std::vector< std::shared_ptr< Analyzer::Expr > > translate_scalar_sources (const RA *ra_node, const RelAlgTranslator &translator)
 
template<class RA >
std::vector< std::shared_ptr< Analyzer::Expr > > translate_scalar_sources_for_update (const RA *ra_node, const RelAlgTranslator &translator, int32_t tableId, const Catalog_Namespace::Catalog &cat, const ColumnNameList &colNames, size_t starting_projection_column_idx)
 
std::list< std::shared_ptr< Analyzer::Expr > > translate_groupby_exprs (const RelCompound *compound, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
std::list< std::shared_ptr< Analyzer::Expr > > translate_groupby_exprs (const RelAggregate *aggregate, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
QualsConjunctiveForm translate_quals (const RelCompound *compound, const RelAlgTranslator &translator)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelCompound *compound, const RelAlgTranslator &translator)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelAggregate *aggregate, const RelAlgTranslator &translator)
 
bool is_count_distinct (const Analyzer::Expr *expr)
 
bool is_agg (const Analyzer::Expr *expr)
 
SQLTypeInfo get_logical_type_for_expr (const Analyzer::Expr &expr)
 
template<class RA >
std::vector< TargetMetaInfoget_targets_meta (const RA *ra_node, const std::vector< Analyzer::Expr *> &target_exprs)
 
bool is_window_execution_unit (const RelAlgExecutionUnit &ra_exe_unit)
 
std::shared_ptr< Analyzer::Exprtransform_to_inner (const Analyzer::Expr *expr)
 
std::list< Analyzer::OrderEntryget_order_entries (const RelSort *sort)
 
size_t get_scan_limit (const RelAlgNode *ra, const size_t limit)
 
bool first_oe_is_desc (const std::list< Analyzer::OrderEntry > &order_entries)
 
size_t groups_approx_upper_bound (const std::vector< InputTableInfo > &table_infos)
 
bool compute_output_buffer_size (const RelAlgExecutionUnit &ra_exe_unit)
 
bool exe_unit_has_quals (const RelAlgExecutionUnit ra_exe_unit)
 
RelAlgExecutionUnit decide_approx_count_distinct_implementation (const RelAlgExecutionUnit &ra_exe_unit_in, const std::vector< InputTableInfo > &table_infos, const Executor *executor, const ExecutorDeviceType device_type_in, std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned)
 
void build_render_targets (RenderInfo &render_info, const std::vector< Analyzer::Expr *> &work_unit_target_exprs, const std::vector< TargetMetaInfo > &targets_meta)
 
bool can_use_bump_allocator (const RelAlgExecutionUnit &ra_exe_unit, const CompilationOptions &co, const ExecutionOptions &eo)
 
JoinType get_join_type (const RelAlgNode *ra)
 
std::unique_ptr< const RexOperatorget_bitwise_equals (const RexScalar *scalar)
 
std::unique_ptr< const RexOperatorget_bitwise_equals_conjunction (const RexScalar *scalar)
 
std::vector< JoinTypeleft_deep_join_types (const RelLeftDeepInnerJoin *left_deep_join)
 
template<class RA >
std::vector< size_t > do_table_reordering (std::vector< InputDescriptor > &input_descs, std::list< std::shared_ptr< const InputColDescriptor >> &input_col_descs, const JoinQualsPerNestingLevel &left_deep_join_quals, std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const RA *node, const std::vector< InputTableInfo > &query_infos, const Executor *executor)
 
std::vector< size_t > get_left_deep_join_input_sizes (const RelLeftDeepInnerJoin *left_deep_join)
 
std::list< std::shared_ptr< Analyzer::Expr > > rewrite_quals (const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
 
std::vector< const RexScalar * > rex_to_conjunctive_form (const RexScalar *qual_expr)
 
std::shared_ptr< Analyzer::Exprbuild_logical_expression (const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
 
template<class QualsList >
bool list_contains_expression (const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
 
std::shared_ptr< Analyzer::Exprreverse_logical_distribution (const std::shared_ptr< Analyzer::Expr > &expr)
 
std::vector< std::shared_ptr< Analyzer::Expr > > synthesize_inputs (const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
std::pair< std::vector< TargetMetaInfo >, std::vector< std::shared_ptr< Analyzer::Expr > > > get_inputs_meta (const RelFilter *filter, const RelAlgTranslator &translator, const std::vector< std::shared_ptr< RexInput >> &inputs_owned, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 

Function Documentation

◆ build_logical_expression()

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::build_logical_expression ( const std::vector< std::shared_ptr< Analyzer::Expr >> &  factors,
const SQLOps  sql_op 
)

Definition at line 2524 of file RelAlgExecutor.cpp.

References CHECK, kONE, and Parser::OperExpr::normalize().

Referenced by reverse_logical_distribution().

2526  {
2527  CHECK(!factors.empty());
2528  auto acc = factors.front();
2529  for (size_t i = 1; i < factors.size(); ++i) {
2530  acc = Parser::OperExpr::normalize(sql_op, kONE, acc, factors[i]);
2531  }
2532  return acc;
2533 }
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:261
Definition: sqldefs.h:69
#define CHECK(condition)
Definition: Logger.h:193
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ build_render_targets()

void anonymous_namespace{RelAlgExecutor.cpp}::build_render_targets ( RenderInfo render_info,
const std::vector< Analyzer::Expr *> &  work_unit_target_exprs,
const std::vector< TargetMetaInfo > &  targets_meta 
)

Definition at line 1833 of file RelAlgExecutor.cpp.

References CHECK_EQ, and RenderInfo::targets.

Referenced by RelAlgExecutor::executeWorkUnit().

1835  {
1836  CHECK_EQ(work_unit_target_exprs.size(), targets_meta.size());
1837  render_info.targets.clear();
1838  for (size_t i = 0; i < targets_meta.size(); ++i) {
1839  render_info.targets.emplace_back(std::make_shared<Analyzer::TargetEntry>(
1840  targets_meta[i].get_resname(),
1841  work_unit_target_exprs[i]->get_shared_ptr(),
1842  false));
1843  }
1844 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
std::vector< std::shared_ptr< Analyzer::TargetEntry > > targets
Definition: RenderInfo.h:38
+ Here is the caller graph for this function:

◆ can_use_bump_allocator()

bool anonymous_namespace{RelAlgExecutor.cpp}::can_use_bump_allocator ( const RelAlgExecutionUnit ra_exe_unit,
const CompilationOptions co,
const ExecutionOptions eo 
)
inline

Definition at line 1846 of file RelAlgExecutor.cpp.

References CompilationOptions::device_type_, g_enable_bump_allocator, GPU, SortInfo::order_entries, ExecutionOptions::output_columnar_hint, and RelAlgExecutionUnit::sort_info.

Referenced by RelAlgExecutor::executeWorkUnit().

1848  {
1850  !eo.output_columnar_hint && ra_exe_unit.sort_info.order_entries.empty();
1851 }
const std::list< Analyzer::OrderEntry > order_entries
const SortInfo sort_info
bool g_enable_bump_allocator
Definition: Execute.cpp:99
const bool output_columnar_hint
ExecutorDeviceType device_type_
+ Here is the caller graph for this function:

◆ check_sort_node_source_constraint()

void anonymous_namespace{RelAlgExecutor.cpp}::check_sort_node_source_constraint ( const RelSort sort)
inline

Definition at line 185 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

185  {
186  CHECK_EQ(size_t(1), sort->inputCount());
187  const auto source = sort->getInput(0);
188  if (dynamic_cast<const RelSort*>(source)) {
189  throw std::runtime_error("Sort node not supported as input to another sort");
190  }
191 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
const size_t inputCount() const
const RelAlgNode * getInput(const size_t idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ collect_used_input_desc()

void anonymous_namespace{RelAlgExecutor.cpp}::collect_used_input_desc ( std::vector< InputDescriptor > &  input_descs,
const Catalog_Namespace::Catalog cat,
std::unordered_set< std::shared_ptr< const InputColDescriptor >> &  input_col_descs_unique,
const RelAlgNode ra_node,
const std::unordered_set< const RexInput *> &  source_used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 685 of file RelAlgExecutor.cpp.

References get_data_sink(), get_non_join_sequence(), Catalog_Namespace::Catalog::getColumnIdBySpi(), and table_id_from_ra().

Referenced by get_input_desc_impl().

691  {
692  std::unordered_set<InputDescriptor> input_descs_unique(input_descs.begin(),
693  input_descs.end());
694  const auto non_join_src_seq = get_non_join_sequence(get_data_sink(ra_node));
695  std::unordered_map<const RelAlgNode*, int> non_join_to_nest_level;
696  for (const auto node : non_join_src_seq) {
697  non_join_to_nest_level.insert(std::make_pair(node, non_join_to_nest_level.size()));
698  }
699  for (const auto used_input : source_used_inputs) {
700  const auto input_ra = used_input->getSourceNode();
701  const int table_id = table_id_from_ra(input_ra);
702  const auto col_id = used_input->getIndex();
703  auto it = input_to_nest_level.find(input_ra);
704  if (it == input_to_nest_level.end()) {
705  throw std::runtime_error("Bushy joins not supported");
706  }
707  const int input_desc = it->second;
708  input_col_descs_unique.insert(std::make_shared<const InputColDescriptor>(
709  dynamic_cast<const RelScan*>(input_ra)
710  ? cat.getColumnIdBySpi(table_id, col_id + 1)
711  : col_id,
712  table_id,
713  input_desc));
714  }
715 }
int table_id_from_ra(const RelAlgNode *ra_node)
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1459
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
std::vector< const RelAlgNode * > get_non_join_sequence(const RelAlgNode *ra)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ compute_output_buffer_size()

bool anonymous_namespace{RelAlgExecutor.cpp}::compute_output_buffer_size ( const RelAlgExecutionUnit ra_exe_unit)

Determines whether a query needs to compute the size of its output buffer. Returns true for projection queries with no LIMIT or a LIMIT that exceeds the high scan limit threshold (meaning it would be cheaper to compute the number of rows passing or use the bump allocator than allocate the current scan limit per GPU)

Definition at line 1745 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::groupby_exprs, Executor::high_scan_limit, RelAlgExecutionUnit::scan_limit, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

1745  {
1746  for (const auto target_expr : ra_exe_unit.target_exprs) {
1747  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
1748  return false;
1749  }
1750  }
1751  if (ra_exe_unit.groupby_exprs.size() == 1 && !ra_exe_unit.groupby_exprs.front() &&
1752  (!ra_exe_unit.scan_limit || ra_exe_unit.scan_limit > Executor::high_scan_limit)) {
1753  return true;
1754  }
1755  return false;
1756 }
std::vector< Analyzer::Expr * > target_exprs
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
static const size_t high_scan_limit
Definition: Execute.h:409
+ Here is the caller graph for this function:

◆ decide_approx_count_distinct_implementation()

RelAlgExecutionUnit anonymous_namespace{RelAlgExecutor.cpp}::decide_approx_count_distinct_implementation ( const RelAlgExecutionUnit ra_exe_unit_in,
const std::vector< InputTableInfo > &  table_infos,
const Executor executor,
const ExecutorDeviceType  device_type_in,
std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned 
)

Definition at line 1763 of file RelAlgExecutor.cpp.

References Bitmap, CHECK, CHECK_GE, g_bigint_count, g_cluster, g_hll_precision_bits, get_agg_type(), get_count_distinct_sub_bitmap_count(), get_target_info(), getExpressionRange(), GPU, hll_size_for_rate(), Integer, kAPPROX_COUNT_DISTINCT, kCOUNT, kENCODING_DICT, kINT, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit(), and RelAlgExecutor::handleOutOfMemoryRetry().

1768  {
1769  RelAlgExecutionUnit ra_exe_unit = ra_exe_unit_in;
1770  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); ++i) {
1771  const auto target_expr = ra_exe_unit.target_exprs[i];
1772  const auto agg_info = get_target_info(target_expr, g_bigint_count);
1773  if (agg_info.agg_kind != kAPPROX_COUNT_DISTINCT) {
1774  continue;
1775  }
1776  CHECK(dynamic_cast<const Analyzer::AggExpr*>(target_expr));
1777  const auto arg = static_cast<Analyzer::AggExpr*>(target_expr)->get_own_arg();
1778  CHECK(arg);
1779  const auto& arg_ti = arg->get_type_info();
1780  // Avoid calling getExpressionRange for variable length types (string and array),
1781  // it'd trigger an assertion since that API expects to be called only for types
1782  // for which the notion of range is well-defined. A bit of a kludge, but the
1783  // logic to reject these types anyway is at lower levels in the stack and not
1784  // really worth pulling into a separate function for now.
1785  if (!(arg_ti.is_number() || arg_ti.is_boolean() || arg_ti.is_time() ||
1786  (arg_ti.is_string() && arg_ti.get_compression() == kENCODING_DICT))) {
1787  continue;
1788  }
1789  const auto arg_range = getExpressionRange(arg.get(), table_infos, executor);
1790  if (arg_range.getType() != ExpressionRangeType::Integer) {
1791  continue;
1792  }
1793  // When running distributed, the threshold for using the precise implementation
1794  // must be consistent across all leaves, otherwise we could have a mix of precise
1795  // and approximate bitmaps and we cannot aggregate them.
1796  const auto device_type = g_cluster ? ExecutorDeviceType::GPU : device_type_in;
1797  const auto bitmap_sz_bits = arg_range.getIntMax() - arg_range.getIntMin() + 1;
1798  const auto sub_bitmap_count =
1799  get_count_distinct_sub_bitmap_count(bitmap_sz_bits, ra_exe_unit, device_type);
1800  int64_t approx_bitmap_sz_bits{0};
1801  const auto error_rate =
1802  static_cast<Analyzer::AggExpr*>(target_expr)->get_error_rate();
1803  if (error_rate) {
1804  CHECK(error_rate->get_type_info().get_type() == kINT);
1805  CHECK_GE(error_rate->get_constval().intval, 1);
1806  approx_bitmap_sz_bits = hll_size_for_rate(error_rate->get_constval().intval);
1807  } else {
1808  approx_bitmap_sz_bits = g_hll_precision_bits;
1809  }
1810  CountDistinctDescriptor approx_count_distinct_desc{CountDistinctImplType::Bitmap,
1811  arg_range.getIntMin(),
1812  approx_bitmap_sz_bits,
1813  true,
1814  device_type,
1815  sub_bitmap_count};
1816  CountDistinctDescriptor precise_count_distinct_desc{CountDistinctImplType::Bitmap,
1817  arg_range.getIntMin(),
1818  bitmap_sz_bits,
1819  false,
1820  device_type,
1821  sub_bitmap_count};
1822  if (approx_count_distinct_desc.bitmapPaddedSizeBytes() >=
1823  precise_count_distinct_desc.bitmapPaddedSizeBytes()) {
1824  auto precise_count_distinct = makeExpr<Analyzer::AggExpr>(
1825  get_agg_type(kCOUNT, arg.get()), kCOUNT, arg, true, nullptr);
1826  target_exprs_owned.push_back(precise_count_distinct);
1827  ra_exe_unit.target_exprs[i] = precise_count_distinct.get();
1828  }
1829  }
1830  return ra_exe_unit;
1831 }
std::vector< Analyzer::Expr * > target_exprs
int hll_size_for_rate(const int err_percent)
Definition: HyperLogLog.h:115
bool g_cluster
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:66
#define CHECK_GE(x, y)
Definition: Logger.h:206
SQLTypeInfo get_agg_type(const SQLAgg agg_kind, const Analyzer::Expr *arg_expr)
int g_hll_precision_bits
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
bool g_bigint_count
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
Definition: sqldefs.h:76
#define CHECK(condition)
Definition: Logger.h:193
Definition: sqltypes.h:48
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ do_table_reordering()

template<class RA >
std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::do_table_reordering ( std::vector< InputDescriptor > &  input_descs,
std::list< std::shared_ptr< const InputColDescriptor >> &  input_col_descs,
const JoinQualsPerNestingLevel left_deep_join_quals,
std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const RA *  node,
const std::vector< InputTableInfo > &  query_infos,
const Executor executor 
)

Definition at line 2375 of file RelAlgExecutor.cpp.

References CHECK, g_cluster, get_input_desc(), get_input_nest_levels(), get_node_input_permutation(), and table_is_replicated().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

2382  {
2383  if (g_cluster) {
2384  // Disable table reordering in distributed mode. The aggregator does not have enough
2385  // information to break ties
2386  return {};
2387  }
2388  const auto& cat = *executor->getCatalog();
2389  for (const auto& table_info : query_infos) {
2390  if (table_info.table_id < 0) {
2391  continue;
2392  }
2393  const auto td = cat.getMetadataForTable(table_info.table_id);
2394  CHECK(td);
2395  if (table_is_replicated(td)) {
2396  return {};
2397  }
2398  }
2399  const auto input_permutation =
2400  get_node_input_permutation(left_deep_join_quals, query_infos, executor);
2401  input_to_nest_level = get_input_nest_levels(node, input_permutation);
2402  std::tie(input_descs, input_col_descs, std::ignore) =
2403  get_input_desc(node, input_to_nest_level, input_permutation, cat);
2404  return input_permutation;
2405 }
std::unordered_map< const RelAlgNode *, int > get_input_nest_levels(const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
bool g_cluster
std::vector< node_t > get_node_input_permutation(const JoinQualsPerNestingLevel &left_deep_join_quals, const std::vector< InputTableInfo > &table_infos, const Executor *executor)
bool table_is_replicated(const TableDescriptor *td)
#define CHECK(condition)
Definition: Logger.h:193
std::tuple< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > >, std::vector< std::shared_ptr< RexInput > > > get_input_desc(const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ exe_unit_has_quals()

bool anonymous_namespace{RelAlgExecutor.cpp}::exe_unit_has_quals ( const RelAlgExecutionUnit  ra_exe_unit)
inline

Definition at line 1758 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::join_quals, RelAlgExecutionUnit::quals, and RelAlgExecutionUnit::simple_quals.

Referenced by RelAlgExecutor::executeWorkUnit().

1758  {
1759  return !(ra_exe_unit.quals.empty() && ra_exe_unit.join_quals.empty() &&
1760  ra_exe_unit.simple_quals.empty());
1761 }
const JoinQualsPerNestingLevel join_quals
std::list< std::shared_ptr< Analyzer::Expr > > quals
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals
+ Here is the caller graph for this function:

◆ first_oe_is_desc()

bool anonymous_namespace{RelAlgExecutor.cpp}::first_oe_is_desc ( const std::list< Analyzer::OrderEntry > &  order_entries)

Definition at line 1552 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

1552  {
1553  return !order_entries.empty() && order_entries.front().is_desc;
1554 }
+ Here is the caller graph for this function:

◆ get_bitwise_equals()

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals ( const RexScalar scalar)

Definition at line 2294 of file RelAlgExecutor.cpp.

References CHECK_EQ, kAND, kBW_EQ, kEQ, kISNULL, kOR, and RexVisitorBase< T >::visit().

Referenced by get_bitwise_equals_conjunction().

2294  {
2295  const auto condition = dynamic_cast<const RexOperator*>(scalar);
2296  if (!condition || condition->getOperator() != kOR || condition->size() != 2) {
2297  return nullptr;
2298  }
2299  const auto equi_join_condition =
2300  dynamic_cast<const RexOperator*>(condition->getOperand(0));
2301  if (!equi_join_condition || equi_join_condition->getOperator() != kEQ) {
2302  return nullptr;
2303  }
2304  const auto both_are_null_condition =
2305  dynamic_cast<const RexOperator*>(condition->getOperand(1));
2306  if (!both_are_null_condition || both_are_null_condition->getOperator() != kAND ||
2307  both_are_null_condition->size() != 2) {
2308  return nullptr;
2309  }
2310  const auto lhs_is_null =
2311  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(0));
2312  const auto rhs_is_null =
2313  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(1));
2314  if (!lhs_is_null || !rhs_is_null || lhs_is_null->getOperator() != kISNULL ||
2315  rhs_is_null->getOperator() != kISNULL) {
2316  return nullptr;
2317  }
2318  CHECK_EQ(size_t(1), lhs_is_null->size());
2319  CHECK_EQ(size_t(1), rhs_is_null->size());
2320  CHECK_EQ(size_t(2), equi_join_condition->size());
2321  const auto eq_lhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(0));
2322  const auto eq_rhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(1));
2323  const auto is_null_lhs = dynamic_cast<const RexInput*>(lhs_is_null->getOperand(0));
2324  const auto is_null_rhs = dynamic_cast<const RexInput*>(rhs_is_null->getOperand(0));
2325  if (!eq_lhs || !eq_rhs || !is_null_lhs || !is_null_rhs) {
2326  return nullptr;
2327  }
2328  std::vector<std::unique_ptr<const RexScalar>> eq_operands;
2329  if (*eq_lhs == *is_null_lhs && *eq_rhs == *is_null_rhs) {
2330  RexDeepCopyVisitor deep_copy_visitor;
2331  auto lhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(0));
2332  auto rhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(1));
2333  eq_operands.emplace_back(lhs_op_copy.release());
2334  eq_operands.emplace_back(rhs_op_copy.release());
2335  return boost::make_unique<const RexOperator>(
2336  kBW_EQ, eq_operands, equi_join_condition->getType());
2337  }
2338  return nullptr;
2339 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
Definition: sqldefs.h:38
Definition: sqldefs.h:30
Definition: sqldefs.h:37
virtual T visit(const RexScalar *rex_scalar) const
Definition: RexVisitor.h:27
Definition: sqldefs.h:31
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_bitwise_equals_conjunction()

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals_conjunction ( const RexScalar scalar)

Definition at line 2341 of file RelAlgExecutor.cpp.

References CHECK_GE, get_bitwise_equals(), and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

2342  {
2343  const auto condition = dynamic_cast<const RexOperator*>(scalar);
2344  if (condition && condition->getOperator() == kAND) {
2345  CHECK_GE(condition->size(), size_t(2));
2346  auto acc = get_bitwise_equals(condition->getOperand(0));
2347  if (!acc) {
2348  return nullptr;
2349  }
2350  for (size_t i = 1; i < condition->size(); ++i) {
2351  std::vector<std::unique_ptr<const RexScalar>> and_operands;
2352  and_operands.emplace_back(std::move(acc));
2353  and_operands.emplace_back(get_bitwise_equals_conjunction(condition->getOperand(i)));
2354  acc =
2355  boost::make_unique<const RexOperator>(kAND, and_operands, condition->getType());
2356  }
2357  return acc;
2358  }
2359  return get_bitwise_equals(scalar);
2360 }
std::unique_ptr< const RexOperator > get_bitwise_equals_conjunction(const RexScalar *scalar)
#define CHECK_GE(x, y)
Definition: Logger.h:206
Definition: sqldefs.h:37
std::unique_ptr< const RexOperator > get_bitwise_equals(const RexScalar *scalar)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_data_sink()

const RelAlgNode* anonymous_namespace{RelAlgExecutor.cpp}::get_data_sink ( const RelAlgNode ra_node)

Definition at line 501 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), RelAlgNode::inputCount(), and join().

Referenced by collect_used_input_desc(), get_input_desc_impl(), get_input_nest_levels(), get_inputs_meta(), get_join_source_used_inputs(), get_join_type(), and get_used_inputs().

501  {
502  if (auto join = dynamic_cast<const RelJoin*>(ra_node)) {
503  CHECK_EQ(size_t(2), join->inputCount());
504  return join;
505  }
506  CHECK_EQ(size_t(1), ra_node->inputCount());
507  auto only_src = ra_node->getInput(0);
508  const bool is_join = dynamic_cast<const RelJoin*>(only_src) ||
509  dynamic_cast<const RelLeftDeepInnerJoin*>(only_src);
510  return is_join ? only_src : ra_node;
511 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
std::string join(T const &container, std::string const &delim)
const size_t inputCount() const
const RelAlgNode * getInput(const size_t idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_input_desc()

template<class RA >
std::tuple<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> >, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc ( const RA *  ra_node,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 778 of file RelAlgExecutor.cpp.

References get_input_desc_impl(), and get_used_inputs().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), and do_table_reordering().

781  {
782  std::unordered_set<const RexInput*> used_inputs;
783  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
784  std::tie(used_inputs, used_inputs_owned) = get_used_inputs(ra_node, cat);
785  auto input_desc_pair = get_input_desc_impl(
786  ra_node, used_inputs, input_to_nest_level, input_permutation, cat);
787  return std::make_tuple(
788  input_desc_pair.first, input_desc_pair.second, used_inputs_owned);
789 }
std::pair< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > > > get_input_desc_impl(const RA *ra_node, const std::unordered_set< const RexInput *> &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs(const RelFilter *filter, const Catalog_Namespace::Catalog &cat)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_input_desc_impl()

template<class RA >
std::pair<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc_impl ( const RA *  ra_node,
const std::unordered_set< const RexInput *> &  used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 720 of file RelAlgExecutor.cpp.

References collect_used_input_desc(), get_data_sink(), get_join_source_used_inputs(), InputDescriptor::getNestLevel(), and table_id_from_ra().

Referenced by get_input_desc().

724  {
725  std::vector<InputDescriptor> input_descs;
726  const auto data_sink_node = get_data_sink(ra_node);
727  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
728  const auto input_node_idx =
729  input_permutation.empty() ? input_idx : input_permutation[input_idx];
730  const auto input_ra = data_sink_node->getInput(input_node_idx);
731  const int table_id = table_id_from_ra(input_ra);
732  input_descs.emplace_back(table_id, input_idx);
733  }
734  std::sort(input_descs.begin(),
735  input_descs.end(),
736  [](const InputDescriptor& lhs, const InputDescriptor& rhs) {
737  return lhs.getNestLevel() < rhs.getNestLevel();
738  });
739  std::unordered_set<std::shared_ptr<const InputColDescriptor>> input_col_descs_unique;
740  collect_used_input_desc(input_descs,
741  cat,
742  input_col_descs_unique,
743  ra_node,
744  used_inputs,
745  input_to_nest_level);
746  std::unordered_set<const RexInput*> join_source_used_inputs;
747  std::vector<std::shared_ptr<RexInput>> join_source_used_inputs_owned;
748  std::tie(join_source_used_inputs, join_source_used_inputs_owned) =
749  get_join_source_used_inputs(ra_node, cat);
750  collect_used_input_desc(input_descs,
751  cat,
752  input_col_descs_unique,
753  ra_node,
754  join_source_used_inputs,
755  input_to_nest_level);
756  std::vector<std::shared_ptr<const InputColDescriptor>> input_col_descs(
757  input_col_descs_unique.begin(), input_col_descs_unique.end());
758 
759  std::sort(
760  input_col_descs.begin(),
761  input_col_descs.end(),
762  [](std::shared_ptr<const InputColDescriptor> const& lhs,
763  std::shared_ptr<const InputColDescriptor> const& rhs) {
764  if (lhs->getScanDesc().getNestLevel() == rhs->getScanDesc().getNestLevel()) {
765  return lhs->getColId() < rhs->getColId();
766  }
767  return lhs->getScanDesc().getNestLevel() < rhs->getScanDesc().getNestLevel();
768  });
769  return {input_descs,
770  std::list<std::shared_ptr<const InputColDescriptor>>(input_col_descs.begin(),
771  input_col_descs.end())};
772 }
int getNestLevel() const
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_join_source_used_inputs(const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
int table_id_from_ra(const RelAlgNode *ra_node)
void collect_used_input_desc(std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput *> &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_input_nest_levels()

std::unordered_map<const RelAlgNode*, int> anonymous_namespace{RelAlgExecutor.cpp}::get_input_nest_levels ( const RelAlgNode ra_node,
const std::vector< size_t > &  input_permutation 
)

Definition at line 616 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), logger::INFO, and LOG_IF.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), and do_table_reordering().

618  {
619  const auto data_sink_node = get_data_sink(ra_node);
620  std::unordered_map<const RelAlgNode*, int> input_to_nest_level;
621  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
622  const auto input_node_idx =
623  input_permutation.empty() ? input_idx : input_permutation[input_idx];
624  const auto input_ra = data_sink_node->getInput(input_node_idx);
625  const auto it_ok = input_to_nest_level.emplace(input_ra, input_idx);
626  CHECK(it_ok.second);
627  LOG_IF(INFO, !input_permutation.empty())
628  << "Assigned input " << input_ra->toString() << " to nest level " << input_idx;
629  }
630  return input_to_nest_level;
631 }
#define LOG_IF(severity, condition)
Definition: Logger.h:279
#define CHECK(condition)
Definition: Logger.h:193
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_inputs_meta()

std::pair<std::vector<TargetMetaInfo>, std::vector<std::shared_ptr<Analyzer::Expr> > > anonymous_namespace{RelAlgExecutor.cpp}::get_inputs_meta ( const RelFilter filter,
const RelAlgTranslator translator,
const std::vector< std::shared_ptr< RexInput >> &  inputs_owned,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 2900 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), get_exprs_not_owned(), get_targets_meta(), synthesize_inputs(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createFilterWorkUnit().

2903  {
2904  std::vector<TargetMetaInfo> in_metainfo;
2905  std::vector<std::shared_ptr<Analyzer::Expr>> exprs_owned;
2906  const auto data_sink_node = get_data_sink(filter);
2907  auto input_it = inputs_owned.begin();
2908  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
2909  const auto source = data_sink_node->getInput(nest_level);
2910  const auto scan_source = dynamic_cast<const RelScan*>(source);
2911  if (scan_source) {
2912  CHECK(source->getOutputMetainfo().empty());
2913  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources_owned;
2914  for (size_t i = 0; i < scan_source->size(); ++i, ++input_it) {
2915  scalar_sources_owned.push_back(translator.translateScalarRex(input_it->get()));
2916  }
2917  const auto source_metadata =
2918  get_targets_meta(scan_source, get_exprs_not_owned(scalar_sources_owned));
2919  in_metainfo.insert(
2920  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
2921  exprs_owned.insert(
2922  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
2923  } else {
2924  const auto& source_metadata = source->getOutputMetainfo();
2925  input_it += source_metadata.size();
2926  in_metainfo.insert(
2927  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
2928  const auto scalar_sources_owned = synthesize_inputs(
2929  data_sink_node, nest_level, source_metadata, input_to_nest_level);
2930  exprs_owned.insert(
2931  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
2932  }
2933  }
2934  return std::make_pair(in_metainfo, exprs_owned);
2935 }
std::vector< std::shared_ptr< Analyzer::Expr > > synthesize_inputs(const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
std::vector< TargetMetaInfo > get_targets_meta(const RA *ra_node, const std::vector< Analyzer::Expr *> &target_exprs)
std::vector< Analyzer::Expr * > get_exprs_not_owned(const std::vector< std::shared_ptr< Analyzer::Expr >> &exprs)
Definition: Execute.h:213
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
#define CHECK(condition)
Definition: Logger.h:193
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_join_source_used_inputs()

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_join_source_used_inputs ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog cat 
)

Definition at line 634 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, get_data_sink(), anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelAlgNode::inputCount(), join(), run_benchmark_import::result, and RexVisitorBase< T >::visit().

Referenced by get_input_desc_impl().

635  {
636  const auto data_sink_node = get_data_sink(ra_node);
637  if (auto join = dynamic_cast<const RelJoin*>(data_sink_node)) {
638  CHECK_EQ(join->inputCount(), 2u);
639  const auto condition = join->getCondition();
640  RexUsedInputsVisitor visitor(cat);
641  auto condition_inputs = visitor.visit(condition);
642  std::vector<std::shared_ptr<RexInput>> condition_inputs_owned(
643  visitor.get_inputs_owned());
644  return std::make_pair(condition_inputs, condition_inputs_owned);
645  }
646 
647  if (auto left_deep_join = dynamic_cast<const RelLeftDeepInnerJoin*>(data_sink_node)) {
648  CHECK_GE(left_deep_join->inputCount(), 2u);
649  const auto condition = left_deep_join->getInnerCondition();
650  RexUsedInputsVisitor visitor(cat);
651  auto result = visitor.visit(condition);
652  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
653  ++nesting_level) {
654  const auto outer_condition = left_deep_join->getOuterCondition(nesting_level);
655  if (outer_condition) {
656  const auto outer_result = visitor.visit(outer_condition);
657  result.insert(outer_result.begin(), outer_result.end());
658  }
659  }
660  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
661  return std::make_pair(result, used_inputs_owned);
662  }
663 
664  CHECK_EQ(ra_node->inputCount(), 1u);
665  return std::make_pair(std::unordered_set<const RexInput*>{},
666  std::vector<std::shared_ptr<RexInput>>{});
667 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
std::string join(T const &container, std::string const &delim)
#define CHECK_GE(x, y)
Definition: Logger.h:206
const size_t inputCount() const
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_join_type()

JoinType anonymous_namespace{RelAlgExecutor.cpp}::get_join_type ( const RelAlgNode ra)

Definition at line 2282 of file RelAlgExecutor.cpp.

References get_data_sink(), INNER, INVALID, and join().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

2282  {
2283  auto sink = get_data_sink(ra);
2284  if (auto join = dynamic_cast<const RelJoin*>(sink)) {
2285  return join->getJoinType();
2286  }
2287  if (dynamic_cast<const RelLeftDeepInnerJoin*>(sink)) {
2288  return JoinType::INNER;
2289  }
2290 
2291  return JoinType::INVALID;
2292 }
std::string join(T const &container, std::string const &delim)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_left_deep_join_input_sizes()

std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::get_left_deep_join_input_sizes ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 2407 of file RelAlgExecutor.cpp.

References get_node_output(), RelAlgNode::getInput(), and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

2408  {
2409  std::vector<size_t> input_sizes;
2410  for (size_t i = 0; i < left_deep_join->inputCount(); ++i) {
2411  const auto inputs = get_node_output(left_deep_join->getInput(i));
2412  input_sizes.push_back(inputs.size());
2413  }
2414  return input_sizes;
2415 }
const size_t inputCount() const
RANodeOutput get_node_output(const RelAlgNode *ra_node)
const RelAlgNode * getInput(const size_t idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_logical_type_for_expr()

SQLTypeInfo anonymous_namespace{RelAlgExecutor.cpp}::get_logical_type_for_expr ( const Analyzer::Expr expr)
inline

Definition at line 1010 of file RelAlgExecutor.cpp.

References get_logical_type_info(), get_nullable_logical_type_info(), Analyzer::Expr::get_type_info(), is_agg(), is_count_distinct(), and kBIGINT.

Referenced by get_targets_meta().

1010  {
1011  if (is_count_distinct(&expr)) {
1012  return SQLTypeInfo(kBIGINT, false);
1013  } else if (is_agg(&expr)) {
1015  }
1016  return get_logical_type_info(expr.get_type_info());
1017 }
bool is_agg(const Analyzer::Expr *expr)
SQLTypeInfo get_nullable_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:884
SQLTypeInfo get_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:869
bool is_count_distinct(const Analyzer::Expr *expr)
SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities > SQLTypeInfo
Definition: sqltypes.h:852
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:78
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_non_join_sequence()

std::vector<const RelAlgNode*> anonymous_namespace{RelAlgExecutor.cpp}::get_non_join_sequence ( const RelAlgNode ra)

Definition at line 669 of file RelAlgExecutor.cpp.

References CHECK_EQ, and join().

Referenced by collect_used_input_desc().

669  {
670  std::vector<const RelAlgNode*> seq;
671  for (auto join = dynamic_cast<const RelJoin*>(ra); join;
672  join = static_cast<const RelJoin*>(join->getInput(0))) {
673  CHECK_EQ(size_t(2), join->inputCount());
674  seq.emplace_back(join->getInput(1));
675  auto lhs = join->getInput(0);
676  if (!dynamic_cast<const RelJoin*>(lhs)) {
677  seq.emplace_back(lhs);
678  break;
679  }
680  }
681  std::reverse(seq.begin(), seq.end());
682  return seq;
683 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
std::string join(T const &container, std::string const &delim)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_order_entries()

std::list<Analyzer::OrderEntry> anonymous_namespace{RelAlgExecutor.cpp}::get_order_entries ( const RelSort sort)

Definition at line 1532 of file RelAlgExecutor.cpp.

References RelSort::collationCount(), Descending, First, RelSort::getCollation(), and run_benchmark_import::result.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

1532  {
1533  std::list<Analyzer::OrderEntry> result;
1534  for (size_t i = 0; i < sort->collationCount(); ++i) {
1535  const auto sort_field = sort->getCollation(i);
1536  result.emplace_back(sort_field.getField() + 1,
1537  sort_field.getSortDir() == SortDirection::Descending,
1538  sort_field.getNullsPosition() == NullSortedPosition::First);
1539  }
1540  return result;
1541 }
size_t collationCount() const
SortField getCollation(const size_t i) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_physical_inputs()

std::unordered_set<PhysicalInput> anonymous_namespace{RelAlgExecutor.cpp}::get_physical_inputs ( const Catalog_Namespace::Catalog cat,
const RelAlgNode ra 
)

Definition at line 53 of file RelAlgExecutor.cpp.

References Catalog_Namespace::Catalog::getColumnIdBySpi().

Referenced by RelAlgExecutor::computeColRangesCache(), RelAlgExecutor::computeStringDictionaryGenerations(), and RelAlgExecutor::executeRelAlgQueryNoRetry().

55  {
56  auto phys_inputs = get_physical_inputs(ra);
57  std::unordered_set<PhysicalInput> phys_inputs2;
58  for (auto& phi : phys_inputs) {
59  phys_inputs2.insert(
60  PhysicalInput{cat.getColumnIdBySpi(phi.table_id, phi.col_id), phi.table_id});
61  }
62  return phys_inputs2;
63 }
std::unordered_set< PhysicalInput > get_physical_inputs(const Catalog_Namespace::Catalog &cat, const RelAlgNode *ra)
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1459
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_scalar_sources_size() [1/3]

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelCompound compound)

Definition at line 791 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSourcesSize().

791  {
792  return compound->getScalarSourcesSize();
793 }
const size_t getScalarSourcesSize() const
+ Here is the call graph for this function:

◆ get_scalar_sources_size() [2/3]

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelProject project)

Definition at line 795 of file RelAlgExecutor.cpp.

References RelProject::size().

795  {
796  return project->size();
797 }
size_t size() const override
+ Here is the call graph for this function:

◆ get_scalar_sources_size() [3/3]

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelTableFunction table_func)

Definition at line 799 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputsSize().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

799  {
800  return table_func->getTableFuncInputsSize();
801 }
size_t getTableFuncInputsSize() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_scan_limit()

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scan_limit ( const RelAlgNode ra,
const size_t  limit 
)

Definition at line 1543 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit().

1543  {
1544  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
1545  if (aggregate) {
1546  return 0;
1547  }
1548  const auto compound = dynamic_cast<const RelCompound*>(ra);
1549  return (compound && compound->isAggregate()) ? 0 : limit;
1550 }
+ Here is the caller graph for this function:

◆ get_targets_meta()

template<class RA >
std::vector<TargetMetaInfo> anonymous_namespace{RelAlgExecutor.cpp}::get_targets_meta ( const RA *  ra_node,
const std::vector< Analyzer::Expr *> &  target_exprs 
)

Definition at line 1020 of file RelAlgExecutor.cpp.

References CHECK, and get_logical_type_for_expr().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), and get_inputs_meta().

1022  {
1023  std::vector<TargetMetaInfo> targets_meta;
1024  for (size_t i = 0; i < ra_node->size(); ++i) {
1025  CHECK(target_exprs[i]);
1026  // TODO(alex): remove the count distinct type fixup.
1027  targets_meta.emplace_back(ra_node->getFieldName(i),
1028  get_logical_type_for_expr(*target_exprs[i]),
1029  target_exprs[i]->get_type_info());
1030  }
1031  return targets_meta;
1032 }
SQLTypeInfo get_logical_type_for_expr(const Analyzer::Expr &expr)
#define CHECK(condition)
Definition: Logger.h:193
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_used_inputs() [1/5]

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelCompound compound,
const Catalog_Namespace::Catalog cat 
)

Definition at line 514 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelCompound::getFilterExpr(), RelCompound::getScalarSource(), RelCompound::getScalarSourcesSize(), and RexVisitorBase< T >::visit().

514  {
515  RexUsedInputsVisitor visitor(cat);
516  const auto filter_expr = compound->getFilterExpr();
517  std::unordered_set<const RexInput*> used_inputs =
518  filter_expr ? visitor.visit(filter_expr) : std::unordered_set<const RexInput*>{};
519  const auto sources_size = compound->getScalarSourcesSize();
520  for (size_t i = 0; i < sources_size; ++i) {
521  const auto source_inputs = visitor.visit(compound->getScalarSource(i));
522  used_inputs.insert(source_inputs.begin(), source_inputs.end());
523  }
524  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
525  return std::make_pair(used_inputs, used_inputs_owned);
526 }
const size_t getScalarSourcesSize() const
const RexScalar * getScalarSource(const size_t i) const
const RexScalar * getFilterExpr() const
+ Here is the call graph for this function:

◆ get_used_inputs() [2/5]

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelAggregate aggregate,
const Catalog_Namespace::Catalog cat 
)

Definition at line 529 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, RelAggregate::getAggExprs(), RelAggregate::getGroupByCount(), RelAlgNode::getInput(), RelAlgNode::getOutputMetainfo(), and RelAlgNode::inputCount().

529  {
530  CHECK_EQ(size_t(1), aggregate->inputCount());
531  std::unordered_set<const RexInput*> used_inputs;
532  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
533  const auto source = aggregate->getInput(0);
534  const auto& in_metainfo = source->getOutputMetainfo();
535  const auto group_count = aggregate->getGroupByCount();
536  CHECK_GE(in_metainfo.size(), group_count);
537  for (size_t i = 0; i < group_count; ++i) {
538  auto synthesized_used_input = new RexInput(source, i);
539  used_inputs_owned.emplace_back(synthesized_used_input);
540  used_inputs.insert(synthesized_used_input);
541  }
542  for (const auto& agg_expr : aggregate->getAggExprs()) {
543  for (size_t i = 0; i < agg_expr->size(); ++i) {
544  const auto operand_idx = agg_expr->getOperand(i);
545  CHECK_GE(in_metainfo.size(), static_cast<size_t>(operand_idx));
546  auto synthesized_used_input = new RexInput(source, operand_idx);
547  used_inputs_owned.emplace_back(synthesized_used_input);
548  used_inputs.insert(synthesized_used_input);
549  }
550  }
551  return std::make_pair(used_inputs, used_inputs_owned);
552 }
#define CHECK_EQ(x, y)
Definition: Logger.h:201
#define CHECK_GE(x, y)
Definition: Logger.h:206
const std::vector< TargetMetaInfo > & getOutputMetainfo() const
const size_t getGroupByCount() const
const size_t inputCount() const
const RelAlgNode * getInput(const size_t idx) const
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
+ Here is the call graph for this function:

◆ get_used_inputs() [3/5]

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelProject project,
const Catalog_Namespace::Catalog cat 
)

Definition at line 555 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelProject::getProjectAt(), RelProject::size(), and RexVisitorBase< T >::visit().

555  {
556  RexUsedInputsVisitor visitor(cat);
557  std::unordered_set<const RexInput*> used_inputs;
558  for (size_t i = 0; i < project->size(); ++i) {
559  const auto proj_inputs = visitor.visit(project->getProjectAt(i));
560  used_inputs.insert(proj_inputs.begin(), proj_inputs.end());
561  }
562  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
563  return std::make_pair(used_inputs, used_inputs_owned);
564 }
size_t size() const override
const RexScalar * getProjectAt(const size_t idx) const
+ Here is the call graph for this function:

◆ get_used_inputs() [4/5]

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelTableFunction table_func,
const Catalog_Namespace::Catalog cat 
)

Definition at line 567 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelTableFunction::getTableFuncInputAt(), RelTableFunction::getTableFuncInputsSize(), and RexVisitorBase< T >::visit().

568  {
569  RexUsedInputsVisitor visitor(cat);
570  std::unordered_set<const RexInput*> used_inputs;
571  for (size_t i = 0; i < table_func->getTableFuncInputsSize(); ++i) {
572  const auto table_func_inputs = visitor.visit(table_func->getTableFuncInputAt(i));
573  used_inputs.insert(table_func_inputs.begin(), table_func_inputs.end());
574  }
575  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
576  return std::make_pair(used_inputs, used_inputs_owned);
577 }
const RexScalar * getTableFuncInputAt(const size_t idx) const
size_t getTableFuncInputsSize() const
+ Here is the call graph for this function:

◆ get_used_inputs() [5/5]

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelFilter filter,
const Catalog_Namespace::Catalog cat 
)

Definition at line 580 of file RelAlgExecutor.cpp.

References CHECK, and get_data_sink().

Referenced by get_input_desc().

580  {
581  std::unordered_set<const RexInput*> used_inputs;
582  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
583  const auto data_sink_node = get_data_sink(filter);
584  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
585  const auto source = data_sink_node->getInput(nest_level);
586  const auto scan_source = dynamic_cast<const RelScan*>(source);
587  if (scan_source) {
588  CHECK(source->getOutputMetainfo().empty());
589  for (size_t i = 0; i < scan_source->size(); ++i) {
590  auto synthesized_used_input = new RexInput(scan_source, i);
591  used_inputs_owned.emplace_back(synthesized_used_input);
592  used_inputs.insert(synthesized_used_input);
593  }
594  } else {
595  const auto& partial_in_metadata = source->getOutputMetainfo();
596  for (size_t i = 0; i < partial_in_metadata.size(); ++i) {
597  auto synthesized_used_input = new RexInput(source, i);
598  used_inputs_owned.emplace_back(synthesized_used_input);
599  used_inputs.insert(synthesized_used_input);
600  }
601  }
602  }
603  return std::make_pair(used_inputs, used_inputs_owned);
604 }
#define CHECK(condition)
Definition: Logger.h:193
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ groups_approx_upper_bound()

size_t anonymous_namespace{RelAlgExecutor.cpp}::groups_approx_upper_bound ( const std::vector< InputTableInfo > &  table_infos)

Upper bound estimation for the number of groups. Not strictly correct and not tight, but if the tables involved are really small we shouldn't waste time doing the NDV estimation. We don't account for cross-joins and / or group by unnested array, which is the reason this estimation isn't entirely reliable.

Definition at line 1727 of file RelAlgExecutor.cpp.

References CHECK.

Referenced by RelAlgExecutor::executeWorkUnit().

1727  {
1728  CHECK(!table_infos.empty());
1729  const auto& first_table = table_infos.front();
1730  size_t max_num_groups = first_table.info.getNumTuplesUpperBound();
1731  for (const auto& table_info : table_infos) {
1732  if (table_info.info.getNumTuplesUpperBound() > max_num_groups) {
1733  max_num_groups = table_info.info.getNumTuplesUpperBound();
1734  }
1735  }
1736  return std::max(max_num_groups, size_t(1));
1737 }
#define CHECK(condition)
Definition: Logger.h:193
+ Here is the caller graph for this function:

◆ is_agg()

bool anonymous_namespace{RelAlgExecutor.cpp}::is_agg ( const Analyzer::Expr expr)

Definition at line 998 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_aggtype(), kAVG, kMAX, kMIN, and kSUM.

Referenced by anonymous_namespace{RelAlgDagBuilder.cpp}::create_compound(), RelAlgExecutor::executeWorkUnit(), get_logical_type_for_expr(), ResultSet::getSingleSlotTargetBitmap(), and Planner::Optimizer::optimize_aggs().

998  {
999  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
1000  if (agg_expr && agg_expr->get_contains_agg()) {
1001  auto agg_type = agg_expr->get_aggtype();
1002  if (agg_type == SQLAgg::kMIN || agg_type == SQLAgg::kMAX ||
1003  agg_type == SQLAgg::kSUM || agg_type == SQLAgg::kAVG) {
1004  return true;
1005  }
1006  }
1007  return false;
1008 }
Definition: sqldefs.h:73
Definition: sqldefs.h:75
SQLAgg get_aggtype() const
Definition: Analyzer.h:1044
Definition: sqldefs.h:74
Definition: sqldefs.h:72
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ is_count_distinct()

bool anonymous_namespace{RelAlgExecutor.cpp}::is_count_distinct ( const Analyzer::Expr expr)

Definition at line 993 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_is_distinct().

Referenced by get_logical_type_for_expr().

993  {
994  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
995  return agg_expr && agg_expr->get_is_distinct();
996 }
bool get_is_distinct() const
Definition: Analyzer.h:1047
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ is_window_execution_unit()

bool anonymous_namespace{RelAlgExecutor.cpp}::is_window_execution_unit ( const RelAlgExecutionUnit ra_exe_unit)

Definition at line 1256 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

1256  {
1257  return std::any_of(ra_exe_unit.target_exprs.begin(),
1258  ra_exe_unit.target_exprs.end(),
1259  [](const Analyzer::Expr* expr) {
1260  return dynamic_cast<const Analyzer::WindowFunction*>(expr);
1261  });
1262 }
std::vector< Analyzer::Expr * > target_exprs
+ Here is the caller graph for this function:

◆ left_deep_join_types()

std::vector<JoinType> anonymous_namespace{RelAlgExecutor.cpp}::left_deep_join_types ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 2362 of file RelAlgExecutor.cpp.

References CHECK_GE, RelLeftDeepInnerJoin::getOuterCondition(), INNER, RelAlgNode::inputCount(), and LEFT.

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::translateLeftDeepJoinFilter().

2362  {
2363  CHECK_GE(left_deep_join->inputCount(), size_t(2));
2364  std::vector<JoinType> join_types(left_deep_join->inputCount() - 1, JoinType::INNER);
2365  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
2366  ++nesting_level) {
2367  if (left_deep_join->getOuterCondition(nesting_level)) {
2368  join_types[nesting_level - 1] = JoinType::LEFT;
2369  }
2370  }
2371  return join_types;
2372 }
#define CHECK_GE(x, y)
Definition: Logger.h:206
const RexScalar * getOuterCondition(const size_t nesting_level) const
const size_t inputCount() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ list_contains_expression()

template<class QualsList >
bool anonymous_namespace{RelAlgExecutor.cpp}::list_contains_expression ( const QualsList &  haystack,
const std::shared_ptr< Analyzer::Expr > &  needle 
)

Definition at line 2536 of file RelAlgExecutor.cpp.

Referenced by reverse_logical_distribution().

2537  {
2538  for (const auto& qual : haystack) {
2539  if (*qual == *needle) {
2540  return true;
2541  }
2542  }
2543  return false;
2544 }
+ Here is the caller graph for this function:

◆ node_is_aggregate()

bool anonymous_namespace{RelAlgExecutor.cpp}::node_is_aggregate ( const RelAlgNode ra)

Definition at line 47 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

47  {
48  const auto compound = dynamic_cast<const RelCompound*>(ra);
49  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
50  return ((compound && compound->isAggregate()) || aggregate);
51 }
+ Here is the caller graph for this function:

◆ reverse_logical_distribution()

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::reverse_logical_distribution ( const std::shared_ptr< Analyzer::Expr > &  expr)

Definition at line 2549 of file RelAlgExecutor.cpp.

References build_logical_expression(), CHECK_GE, kAND, kONE, kOR, list_contains_expression(), Parser::OperExpr::normalize(), qual_to_conjunctive_form(), and qual_to_disjunctive_form().

Referenced by RelAlgExecutor::makeJoinQuals().

2550  {
2551  const auto expr_terms = qual_to_disjunctive_form(expr);
2552  CHECK_GE(expr_terms.size(), size_t(1));
2553  const auto& first_term = expr_terms.front();
2554  const auto first_term_factors = qual_to_conjunctive_form(first_term);
2555  std::vector<std::shared_ptr<Analyzer::Expr>> common_factors;
2556  // First, collect the conjunctive components common to all the disjunctive components.
2557  // Don't do it for simple qualifiers, we only care about expensive or join qualifiers.
2558  for (const auto& first_term_factor : first_term_factors.quals) {
2559  bool is_common =
2560  expr_terms.size() > 1; // Only report common factors for disjunction.
2561  for (size_t i = 1; i < expr_terms.size(); ++i) {
2562  const auto crt_term_factors = qual_to_conjunctive_form(expr_terms[i]);
2563  if (!list_contains_expression(crt_term_factors.quals, first_term_factor)) {
2564  is_common = false;
2565  break;
2566  }
2567  }
2568  if (is_common) {
2569  common_factors.push_back(first_term_factor);
2570  }
2571  }
2572  if (common_factors.empty()) {
2573  return expr;
2574  }
2575  // Now that the common expressions are known, collect the remaining expressions.
2576  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_terms;
2577  for (const auto& term : expr_terms) {
2578  const auto term_cf = qual_to_conjunctive_form(term);
2579  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_quals(
2580  term_cf.simple_quals.begin(), term_cf.simple_quals.end());
2581  for (const auto& qual : term_cf.quals) {
2582  if (!list_contains_expression(common_factors, qual)) {
2583  remaining_quals.push_back(qual);
2584  }
2585  }
2586  if (!remaining_quals.empty()) {
2587  remaining_terms.push_back(build_logical_expression(remaining_quals, kAND));
2588  }
2589  }
2590  // Reconstruct the expression with the transformation applied.
2591  const auto common_expr = build_logical_expression(common_factors, kAND);
2592  if (remaining_terms.empty()) {
2593  return common_expr;
2594  }
2595  const auto remaining_expr = build_logical_expression(remaining_terms, kOR);
2596  return Parser::OperExpr::normalize(kAND, kONE, common_expr, remaining_expr);
2597 }
Definition: sqldefs.h:38
#define CHECK_GE(x, y)
Definition: Logger.h:206
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
bool list_contains_expression(const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
Definition: sqldefs.h:37
std::shared_ptr< Analyzer::Expr > build_logical_expression(const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:261
Definition: sqldefs.h:69
std::vector< std::shared_ptr< Analyzer::Expr > > qual_to_disjunctive_form(const std::shared_ptr< Analyzer::Expr > &qual_expr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rewrite_quals()

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::rewrite_quals ( const std::list< std::shared_ptr< Analyzer::Expr >> &  quals)

Definition at line 2417 of file RelAlgExecutor.cpp.

References rewrite_expr().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

2418  {
2419  std::list<std::shared_ptr<Analyzer::Expr>> rewritten_quals;
2420  for (const auto& qual : quals) {
2421  const auto rewritten_qual = rewrite_expr(qual.get());
2422  rewritten_quals.push_back(rewritten_qual ? rewritten_qual : qual);
2423  }
2424  return rewritten_quals;
2425 }
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rex_to_conjunctive_form()

std::vector<const RexScalar*> anonymous_namespace{RelAlgExecutor.cpp}::rex_to_conjunctive_form ( const RexScalar qual_expr)

Definition at line 2509 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

2509  {
2510  CHECK(qual_expr);
2511  const auto bin_oper = dynamic_cast<const RexOperator*>(qual_expr);
2512  if (!bin_oper || bin_oper->getOperator() != kAND) {
2513  return {qual_expr};
2514  }
2515  CHECK_GE(bin_oper->size(), size_t(2));
2516  auto lhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(0));
2517  for (size_t i = 1; i < bin_oper->size(); ++i) {
2518  const auto rhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(i));
2519  lhs_cf.insert(lhs_cf.end(), rhs_cf.begin(), rhs_cf.end());
2520  }
2521  return lhs_cf;
2522 }
#define CHECK_GE(x, y)
Definition: Logger.h:206
std::vector< const RexScalar * > rex_to_conjunctive_form(const RexScalar *qual_expr)
Definition: sqldefs.h:37
#define CHECK(condition)
Definition: Logger.h:193
+ Here is the caller graph for this function:

◆ scalar_at() [1/3]

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelCompound compound 
)

Definition at line 803 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSource().

803  {
804  return compound->getScalarSource(i);
805 }
const RexScalar * getScalarSource(const size_t i) const
+ Here is the call graph for this function:

◆ scalar_at() [2/3]

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelProject project 
)

Definition at line 807 of file RelAlgExecutor.cpp.

References RelProject::getProjectAt().

807  {
808  return project->getProjectAt(i);
809 }
const RexScalar * getProjectAt(const size_t idx) const
+ Here is the call graph for this function:

◆ scalar_at() [3/3]

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelTableFunction table_func 
)

Definition at line 811 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputAt().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

811  {
812  return table_func->getTableFuncInputAt(i);
813 }
const RexScalar * getTableFuncInputAt(const size_t idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ set_transient_dict()

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict ( const std::shared_ptr< Analyzer::Expr expr)

Definition at line 815 of file RelAlgExecutor.cpp.

References kENCODING_DICT, kENCODING_NONE, and TRANSIENT_DICT_ID.

Referenced by set_transient_dict_maybe(), translate_groupby_exprs(), and translate_targets().

816  {
817  const auto& ti = expr->get_type_info();
818  if (!ti.is_string() || ti.get_compression() != kENCODING_NONE) {
819  return expr;
820  }
821  auto transient_dict_ti = ti;
822  transient_dict_ti.set_compression(kENCODING_DICT);
823  transient_dict_ti.set_comp_param(TRANSIENT_DICT_ID);
824  transient_dict_ti.set_fixed_size();
825  return expr->add_cast(transient_dict_ti);
826 }
#define TRANSIENT_DICT_ID
Definition: sqltypes.h:189
+ Here is the caller graph for this function:

◆ set_transient_dict_maybe()

void anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict_maybe ( std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::shared_ptr< Analyzer::Expr > &  expr 
)

Definition at line 828 of file RelAlgExecutor.cpp.

References fold_expr(), and set_transient_dict().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

830  {
831  try {
832  scalar_sources.push_back(set_transient_dict(fold_expr(expr.get())));
833  } catch (...) {
834  scalar_sources.push_back(fold_expr(expr.get()));
835  }
836 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ synthesize_inputs()

std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::synthesize_inputs ( const RelAlgNode ra_node,
const size_t  nest_level,
const std::vector< TargetMetaInfo > &  in_metainfo,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 2676 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, CHECK_LE, RelAlgNode::getInput(), RelAlgNode::inputCount(), and table_id_from_ra().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and get_inputs_meta().

2680  {
2681  CHECK_LE(size_t(1), ra_node->inputCount());
2682  CHECK_GE(size_t(2), ra_node->inputCount());
2683  const auto input = ra_node->getInput(nest_level);
2684  const auto it_rte_idx = input_to_nest_level.find(input);
2685  CHECK(it_rte_idx != input_to_nest_level.end());
2686  const int rte_idx = it_rte_idx->second;
2687  const int table_id = table_id_from_ra(input);
2688  std::vector<std::shared_ptr<Analyzer::Expr>> inputs;
2689  const auto scan_ra = dynamic_cast<const RelScan*>(input);
2690  int input_idx = 0;
2691  for (const auto& input_meta : in_metainfo) {
2692  inputs.push_back(
2693  std::make_shared<Analyzer::ColumnVar>(input_meta.get_type_info(),
2694  table_id,
2695  scan_ra ? input_idx + 1 : input_idx,
2696  rte_idx));
2697  ++input_idx;
2698  }
2699  return inputs;
2700 }
#define CHECK_GE(x, y)
Definition: Logger.h:206
#define CHECK_LE(x, y)
Definition: Logger.h:204
const size_t inputCount() const
int table_id_from_ra(const RelAlgNode *ra_node)
#define CHECK(condition)
Definition: Logger.h:193
const RelAlgNode * getInput(const size_t idx) const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ table_id_from_ra()

int anonymous_namespace{RelAlgExecutor.cpp}::table_id_from_ra ( const RelAlgNode ra_node)

Definition at line 606 of file RelAlgExecutor.cpp.

References CHECK, RelAlgNode::getId(), and RelScan::getTableDescriptor().

Referenced by collect_used_input_desc(), get_input_desc_impl(), and synthesize_inputs().

606  {
607  const auto scan_ra = dynamic_cast<const RelScan*>(ra_node);
608  if (scan_ra) {
609  const auto td = scan_ra->getTableDescriptor();
610  CHECK(td);
611  return td->tableId;
612  }
613  return -ra_node->getId();
614 }
const TableDescriptor * getTableDescriptor() const
unsigned getId() const
#define CHECK(condition)
Definition: Logger.h:193
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ transform_to_inner()

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::transform_to_inner ( const Analyzer::Expr expr)

Definition at line 1345 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::computeWindow().

1345  {
1346  const auto tuple = dynamic_cast<const Analyzer::ExpressionTuple*>(expr);
1347  if (tuple) {
1348  std::vector<std::shared_ptr<Analyzer::Expr>> transformed_tuple;
1349  for (const auto& element : tuple->getTuple()) {
1350  transformed_tuple.push_back(transform_to_inner(element.get()));
1351  }
1352  return makeExpr<Analyzer::ExpressionTuple>(transformed_tuple);
1353  }
1354  const auto col = dynamic_cast<const Analyzer::ColumnVar*>(expr);
1355  if (!col) {
1356  throw std::runtime_error("Only columns supported in the window partition for now");
1357  }
1358  return makeExpr<Analyzer::ColumnVar>(
1359  col->get_type_info(), col->get_table_id(), col->get_column_id(), 1);
1360 }
std::shared_ptr< Analyzer::Expr > transform_to_inner(const Analyzer::Expr *expr)
+ Here is the caller graph for this function:

◆ translate_groupby_exprs() [1/2]

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelCompound compound,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 894 of file RelAlgExecutor.cpp.

References RelCompound::getGroupByCount(), RelCompound::isAggregate(), and set_transient_dict().

896  {
897  if (!compound->isAggregate()) {
898  return {nullptr};
899  }
900  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
901  for (size_t group_idx = 0; group_idx < compound->getGroupByCount(); ++group_idx) {
902  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
903  }
904  return groupby_exprs;
905 }
const size_t getGroupByCount() const
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
bool isAggregate() const
+ Here is the call graph for this function:

◆ translate_groupby_exprs() [2/2]

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelAggregate aggregate,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 907 of file RelAlgExecutor.cpp.

References RelAggregate::getGroupByCount(), and set_transient_dict().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

909  {
910  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
911  for (size_t group_idx = 0; group_idx < aggregate->getGroupByCount(); ++group_idx) {
912  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
913  }
914  return groupby_exprs;
915 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
const size_t getGroupByCount() const
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ translate_quals()

QualsConjunctiveForm anonymous_namespace{RelAlgExecutor.cpp}::translate_quals ( const RelCompound compound,
const RelAlgTranslator translator 
)

Definition at line 917 of file RelAlgExecutor.cpp.

References fold_expr(), RelCompound::getFilterExpr(), qual_to_conjunctive_form(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

918  {
919  const auto filter_rex = compound->getFilterExpr();
920  const auto filter_expr =
921  filter_rex ? translator.translateScalarRex(filter_rex) : nullptr;
922  return filter_expr ? qual_to_conjunctive_form(fold_expr(filter_expr.get()))
924 }
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
const RexScalar * getFilterExpr() const
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ translate_scalar_sources()

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources ( const RA *  ra_node,
const RelAlgTranslator translator 
)

Definition at line 839 of file RelAlgExecutor.cpp.

References get_scalar_sources_size(), rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::createTableFunctionWorkUnit().

841  {
842  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
843  for (size_t i = 0; i < get_scalar_sources_size(ra_node); ++i) {
844  const auto scalar_rex = scalar_at(i, ra_node);
845  if (dynamic_cast<const RexRef*>(scalar_rex)) {
846  // RexRef are synthetic scalars we append at the end of the real ones
847  // for the sake of taking memory ownership, no real work needed here.
848  continue;
849  }
850 
851  const auto scalar_expr =
852  rewrite_array_elements(translator.translateScalarRex(scalar_rex).get());
853  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
854  set_transient_dict_maybe(scalar_sources, rewritten_expr);
855  }
856 
857  return scalar_sources;
858 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
const RexScalar * scalar_at(const size_t i, const RelTableFunction *table_func)
size_t get_scalar_sources_size(const RelTableFunction *table_func)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ translate_scalar_sources_for_update()

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources_for_update ( const RA *  ra_node,
const RelAlgTranslator translator,
int32_t  tableId,
const Catalog_Namespace::Catalog cat,
const ColumnNameList colNames,
size_t  starting_projection_column_idx 
)

Definition at line 861 of file RelAlgExecutor.cpp.

References get_scalar_sources_size(), rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), and RelAlgTranslator::translateScalarRex().

867  {
868  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
869  for (size_t i = 0; i < get_scalar_sources_size(ra_node); ++i) {
870  const auto scalar_rex = scalar_at(i, ra_node);
871  if (dynamic_cast<const RexRef*>(scalar_rex)) {
872  // RexRef are synthetic scalars we append at the end of the real ones
873  // for the sake of taking memory ownership, no real work needed here.
874  continue;
875  }
876 
877  std::shared_ptr<Analyzer::Expr> translated_expr;
878  if (i >= starting_projection_column_idx && i < get_scalar_sources_size(ra_node) - 1) {
879  translated_expr = cast_to_column_type(translator.translateScalarRex(scalar_rex),
880  tableId,
881  cat,
882  colNames[i - starting_projection_column_idx]);
883  } else {
884  translated_expr = translator.translateScalarRex(scalar_rex);
885  }
886  const auto scalar_expr = rewrite_array_elements(translated_expr.get());
887  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
888  set_transient_dict_maybe(scalar_sources, rewritten_expr);
889  }
890 
891  return scalar_sources;
892 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
const RexScalar * scalar_at(const size_t i, const RelTableFunction *table_func)
size_t get_scalar_sources_size(const RelTableFunction *table_func)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)
+ Here is the call graph for this function:

◆ translate_targets() [1/2]

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelCompound compound,
const RelAlgTranslator translator 
)

Definition at line 926 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, CHECK_LE, fold_expr(), RexRef::getIndex(), RelCompound::getTargetExpr(), Analyzer::Var::kGROUPBY, rewrite_expr(), set_transient_dict(), RelCompound::size(), RelAlgTranslator::translateAggregateRex(), RelAlgTranslator::translateScalarRex(), and var_ref().

931  {
932  std::vector<Analyzer::Expr*> target_exprs;
933  for (size_t i = 0; i < compound->size(); ++i) {
934  const auto target_rex = compound->getTargetExpr(i);
935  const auto target_rex_agg = dynamic_cast<const RexAgg*>(target_rex);
936  std::shared_ptr<Analyzer::Expr> target_expr;
937  if (target_rex_agg) {
938  target_expr =
939  RelAlgTranslator::translateAggregateRex(target_rex_agg, scalar_sources);
940  } else {
941  const auto target_rex_scalar = dynamic_cast<const RexScalar*>(target_rex);
942  const auto target_rex_ref = dynamic_cast<const RexRef*>(target_rex_scalar);
943  if (target_rex_ref) {
944  const auto ref_idx = target_rex_ref->getIndex();
945  CHECK_GE(ref_idx, size_t(1));
946  CHECK_LE(ref_idx, groupby_exprs.size());
947  const auto groupby_expr = *std::next(groupby_exprs.begin(), ref_idx - 1);
948  target_expr = var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, ref_idx);
949  } else {
950  target_expr = translator.translateScalarRex(target_rex_scalar);
951  auto rewritten_expr = rewrite_expr(target_expr.get());
952  target_expr = fold_expr(rewritten_expr.get());
953  try {
954  target_expr = set_transient_dict(target_expr);
955  } catch (...) {
956  // noop
957  }
958  }
959  }
960  CHECK(target_expr);
961  target_exprs_owned.push_back(target_expr);
962  target_exprs.push_back(target_expr.get());
963  }
964  return target_exprs;
965 }
size_t size() const override
const Rex * getTargetExpr(const size_t i) const
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1589
#define CHECK_GE(x, y)
Definition: Logger.h:206
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
size_t getIndex() const
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
#define CHECK_LE(x, y)
Definition: Logger.h:204
#define CHECK(condition)
Definition: Logger.h:193
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
+ Here is the call graph for this function:

◆ translate_targets() [2/2]

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelAggregate aggregate,
const RelAlgTranslator translator 
)

Definition at line 967 of file RelAlgExecutor.cpp.

References CHECK, fold_expr(), RelAggregate::getAggExprs(), Analyzer::Var::kGROUPBY, RelAlgTranslator::translateAggregateRex(), and var_ref().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

972  {
973  std::vector<Analyzer::Expr*> target_exprs;
974  size_t group_key_idx = 0;
975  for (const auto& groupby_expr : groupby_exprs) {
976  auto target_expr =
977  var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, group_key_idx++);
978  target_exprs_owned.push_back(target_expr);
979  target_exprs.push_back(target_expr.get());
980  }
981 
982  for (const auto& target_rex_agg : aggregate->getAggExprs()) {
983  auto target_expr =
984  RelAlgTranslator::translateAggregateRex(target_rex_agg.get(), scalar_sources);
985  CHECK(target_expr);
986  target_expr = fold_expr(target_expr.get());
987  target_exprs_owned.push_back(target_expr);
988  target_exprs.push_back(target_expr.get());
989  }
990  return target_exprs;
991 }
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1589
#define CHECK(condition)
Definition: Logger.h:193
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
+ Here is the call graph for this function:
+ Here is the caller graph for this function: