OmniSciDB  a667adc9c8
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
anonymous_namespace{RelAlgExecutor.cpp} Namespace Reference

Classes

class  RexUsedInputsVisitor
 
struct  ErrorInfo
 

Functions

bool node_is_aggregate (const RelAlgNode *ra)
 
std::unordered_set< PhysicalInputget_physical_inputs (const Catalog_Namespace::Catalog &cat, const RelAlgNode *ra)
 
void set_parallelism_hints (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void prepare_string_dictionaries (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void prepare_foreign_table_for_execution (const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
 
void check_sort_node_source_constraint (const RelSort *sort)
 
const RelAlgNodeget_data_sink (const RelAlgNode *ra_node)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelCompound *compound, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelAggregate *aggregate, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelProject *project, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelTableFunction *table_func, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelFilter *filter, const Catalog_Namespace::Catalog &cat)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_used_inputs (const RelLogicalUnion *logical_union, const Catalog_Namespace::Catalog &)
 
int table_id_from_ra (const RelAlgNode *ra_node)
 
std::unordered_map< const
RelAlgNode *, int > 
get_input_nest_levels (const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
 
std::pair< std::unordered_set
< const RexInput * >
, std::vector< std::shared_ptr
< RexInput > > > 
get_join_source_used_inputs (const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
 
void collect_used_input_desc (std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput * > &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
template<class RA >
std::pair< std::vector
< InputDescriptor >, std::list
< std::shared_ptr< const
InputColDescriptor > > > 
get_input_desc_impl (const RA *ra_node, const std::unordered_set< const RexInput * > &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
template<class RA >
std::tuple< std::vector
< InputDescriptor >, std::list
< std::shared_ptr< const
InputColDescriptor >
>, std::vector
< std::shared_ptr< RexInput > > > 
get_input_desc (const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
 
size_t get_scalar_sources_size (const RelCompound *compound)
 
size_t get_scalar_sources_size (const RelProject *project)
 
size_t get_scalar_sources_size (const RelTableFunction *table_func)
 
const RexScalarscalar_at (const size_t i, const RelCompound *compound)
 
const RexScalarscalar_at (const size_t i, const RelProject *project)
 
const RexScalarscalar_at (const size_t i, const RelTableFunction *table_func)
 
std::shared_ptr< Analyzer::Exprset_transient_dict (const std::shared_ptr< Analyzer::Expr > expr)
 
void set_transient_dict_maybe (std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)
 
std::shared_ptr< Analyzer::Exprcast_dict_to_none (const std::shared_ptr< Analyzer::Expr > &input)
 
template<class RA >
std::vector< std::shared_ptr
< Analyzer::Expr > > 
translate_scalar_sources (const RA *ra_node, const RelAlgTranslator &translator, const ::ExecutorType executor_type)
 
template<class RA >
std::vector< std::shared_ptr
< Analyzer::Expr > > 
translate_scalar_sources_for_update (const RA *ra_node, const RelAlgTranslator &translator, int32_t tableId, const Catalog_Namespace::Catalog &cat, const ColumnNameList &colNames, size_t starting_projection_column_idx)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
translate_groupby_exprs (const RelCompound *compound, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
translate_groupby_exprs (const RelAggregate *aggregate, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)
 
QualsConjunctiveForm translate_quals (const RelCompound *compound, const RelAlgTranslator &translator)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelCompound *compound, const RelAlgTranslator &translator, const ExecutorType executor_type)
 
std::vector< Analyzer::Expr * > translate_targets (std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::list< std::shared_ptr< Analyzer::Expr >> &groupby_exprs, const RelAggregate *aggregate, const RelAlgTranslator &translator)
 
bool is_count_distinct (const Analyzer::Expr *expr)
 
bool is_agg (const Analyzer::Expr *expr)
 
SQLTypeInfo get_logical_type_for_expr (const Analyzer::Expr &expr)
 
template<class RA >
std::vector< TargetMetaInfoget_targets_meta (const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
 
template<>
std::vector< TargetMetaInfoget_targets_meta (const RelFilter *filter, const std::vector< Analyzer::Expr * > &target_exprs)
 
bool is_window_execution_unit (const RelAlgExecutionUnit &ra_exe_unit)
 
std::shared_ptr< Analyzer::Exprtransform_to_inner (const Analyzer::Expr *expr)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const std::string &columnName, const SQLTypeInfo &columnType, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
template<class T >
int64_t insert_one_dict_str (T *col_data, const ColumnDescriptor *cd, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
 
int64_t int_value_from_numbers_ptr (const SQLTypeInfo &type_info, const int8_t *data)
 
const TableDescriptorget_shard_for_key (const TableDescriptor *td, const Catalog_Namespace::Catalog &cat, const Fragmenter_Namespace::InsertData &data)
 
std::list< Analyzer::OrderEntryget_order_entries (const RelSort *sort)
 
size_t get_scan_limit (const RelAlgNode *ra, const size_t limit)
 
bool first_oe_is_desc (const std::list< Analyzer::OrderEntry > &order_entries)
 
size_t groups_approx_upper_bound (const std::vector< InputTableInfo > &table_infos)
 
bool compute_output_buffer_size (const RelAlgExecutionUnit &ra_exe_unit)
 
bool exe_unit_has_quals (const RelAlgExecutionUnit ra_exe_unit)
 
RelAlgExecutionUnit decide_approx_count_distinct_implementation (const RelAlgExecutionUnit &ra_exe_unit_in, const std::vector< InputTableInfo > &table_infos, const Executor *executor, const ExecutorDeviceType device_type_in, std::vector< std::shared_ptr< Analyzer::Expr >> &target_exprs_owned)
 
void build_render_targets (RenderInfo &render_info, const std::vector< Analyzer::Expr * > &work_unit_target_exprs, const std::vector< TargetMetaInfo > &targets_meta)
 
bool can_use_bump_allocator (const RelAlgExecutionUnit &ra_exe_unit, const CompilationOptions &co, const ExecutionOptions &eo)
 
ErrorInfo getErrorDescription (const int32_t error_code)
 
JoinType get_join_type (const RelAlgNode *ra)
 
std::unique_ptr< const
RexOperator
get_bitwise_equals (const RexScalar *scalar)
 
std::unique_ptr< const
RexOperator
get_bitwise_equals_conjunction (const RexScalar *scalar)
 
std::vector< JoinTypeleft_deep_join_types (const RelLeftDeepInnerJoin *left_deep_join)
 
template<class RA >
std::vector< size_t > do_table_reordering (std::vector< InputDescriptor > &input_descs, std::list< std::shared_ptr< const InputColDescriptor >> &input_col_descs, const JoinQualsPerNestingLevel &left_deep_join_quals, std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const RA *node, const std::vector< InputTableInfo > &query_infos, const Executor *executor)
 
std::vector< size_t > get_left_deep_join_input_sizes (const RelLeftDeepInnerJoin *left_deep_join)
 
std::list< std::shared_ptr
< Analyzer::Expr > > 
rewrite_quals (const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
 
std::vector< const RexScalar * > rex_to_conjunctive_form (const RexScalar *qual_expr)
 
std::shared_ptr< Analyzer::Exprbuild_logical_expression (const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
 
template<class QualsList >
bool list_contains_expression (const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
 
std::shared_ptr< Analyzer::Exprreverse_logical_distribution (const std::shared_ptr< Analyzer::Expr > &expr)
 
std::vector< std::shared_ptr
< Analyzer::Expr > > 
synthesize_inputs (const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 
std::vector< std::shared_ptr
< Analyzer::Expr > > 
target_exprs_for_union (RelAlgNode const *input_node)
 
std::pair< std::vector
< TargetMetaInfo >
, std::vector< std::shared_ptr
< Analyzer::Expr > > > 
get_inputs_meta (const RelFilter *filter, const RelAlgTranslator &translator, const std::vector< std::shared_ptr< RexInput >> &inputs_owned, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
 

Function Documentation

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::build_logical_expression ( const std::vector< std::shared_ptr< Analyzer::Expr >> &  factors,
const SQLOps  sql_op 
)

Definition at line 3597 of file RelAlgExecutor.cpp.

References CHECK, i, kONE, and Parser::OperExpr::normalize().

Referenced by reverse_logical_distribution().

3599  {
3600  CHECK(!factors.empty());
3601  auto acc = factors.front();
3602  for (size_t i = 1; i < factors.size(); ++i) {
3603  acc = Parser::OperExpr::normalize(sql_op, kONE, acc, factors[i]);
3604  }
3605  return acc;
3606 }
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:283
Definition: sqldefs.h:69
#define CHECK(condition)
Definition: Logger.h:197

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::build_render_targets ( RenderInfo render_info,
const std::vector< Analyzer::Expr * > &  work_unit_target_exprs,
const std::vector< TargetMetaInfo > &  targets_meta 
)

Definition at line 2822 of file RelAlgExecutor.cpp.

References CHECK_EQ, i, and RenderInfo::targets.

Referenced by RelAlgExecutor::executeWorkUnit().

2824  {
2825  CHECK_EQ(work_unit_target_exprs.size(), targets_meta.size());
2826  render_info.targets.clear();
2827  for (size_t i = 0; i < targets_meta.size(); ++i) {
2828  render_info.targets.emplace_back(std::make_shared<Analyzer::TargetEntry>(
2829  targets_meta[i].get_resname(),
2830  work_unit_target_exprs[i]->get_shared_ptr(),
2831  false));
2832  }
2833 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::vector< std::shared_ptr< Analyzer::TargetEntry > > targets
Definition: RenderInfo.h:37

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::can_use_bump_allocator ( const RelAlgExecutionUnit ra_exe_unit,
const CompilationOptions co,
const ExecutionOptions eo 
)
inline

Definition at line 2835 of file RelAlgExecutor.cpp.

References CompilationOptions::device_type, g_enable_bump_allocator, GPU, SortInfo::order_entries, ExecutionOptions::output_columnar_hint, and RelAlgExecutionUnit::sort_info.

Referenced by RelAlgExecutor::executeWorkUnit().

2837  {
2839  !eo.output_columnar_hint && ra_exe_unit.sort_info.order_entries.empty();
2840 }
const std::list< Analyzer::OrderEntry > order_entries
const SortInfo sort_info
ExecutorDeviceType device_type
bool g_enable_bump_allocator
Definition: Execute.cpp:109

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::cast_dict_to_none ( const std::shared_ptr< Analyzer::Expr > &  input)

Definition at line 1240 of file RelAlgExecutor.cpp.

References kENCODING_DICT, and kTEXT.

Referenced by translate_scalar_sources(), and translate_targets().

1241  {
1242  const auto& input_ti = input->get_type_info();
1243  if (input_ti.is_string() && input_ti.get_compression() == kENCODING_DICT) {
1244  return input->add_cast(SQLTypeInfo(kTEXT, input_ti.get_notnull()));
1245  }
1246  return input;
1247 }
Definition: sqltypes.h:51

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::check_sort_node_source_constraint ( const RelSort sort)
inline

Definition at line 489 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

489  {
490  CHECK_EQ(size_t(1), sort->inputCount());
491  const auto source = sort->getInput(0);
492  if (dynamic_cast<const RelSort*>(source)) {
493  throw std::runtime_error("Sort node not supported as input to another sort");
494  }
495 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::collect_used_input_desc ( std::vector< InputDescriptor > &  input_descs,
const Catalog_Namespace::Catalog cat,
std::unordered_set< std::shared_ptr< const InputColDescriptor >> &  input_col_descs_unique,
const RelAlgNode ra_node,
const std::unordered_set< const RexInput * > &  source_used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 1088 of file RelAlgExecutor.cpp.

References Catalog_Namespace::Catalog::getColumnIdBySpi(), table_id_from_ra(), RelAlgNode::toString(), and VLOG.

Referenced by get_input_desc_impl().

1094  {
1095  VLOG(3) << "ra_node=" << ra_node->toString()
1096  << " input_col_descs_unique.size()=" << input_col_descs_unique.size()
1097  << " source_used_inputs.size()=" << source_used_inputs.size();
1098  for (const auto used_input : source_used_inputs) {
1099  const auto input_ra = used_input->getSourceNode();
1100  const int table_id = table_id_from_ra(input_ra);
1101  const auto col_id = used_input->getIndex();
1102  auto it = input_to_nest_level.find(input_ra);
1103  if (it != input_to_nest_level.end()) {
1104  const int input_desc = it->second;
1105  input_col_descs_unique.insert(std::make_shared<const InputColDescriptor>(
1106  dynamic_cast<const RelScan*>(input_ra)
1107  ? cat.getColumnIdBySpi(table_id, col_id + 1)
1108  : col_id,
1109  table_id,
1110  input_desc));
1111  } else if (!dynamic_cast<const RelLogicalUnion*>(ra_node)) {
1112  throw std::runtime_error("Bushy joins not supported");
1113  }
1114  }
1115 }
int table_id_from_ra(const RelAlgNode *ra_node)
virtual std::string toString() const =0
#define VLOG(n)
Definition: Logger.h:291
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1543

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::compute_output_buffer_size ( const RelAlgExecutionUnit ra_exe_unit)

Determines whether a query needs to compute the size of its output buffer. Returns true for projection queries with no LIMIT or a LIMIT that exceeds the high scan limit threshold (meaning it would be cheaper to compute the number of rows passing or use the bump allocator than allocate the current scan limit per GPU)

Definition at line 2734 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::groupby_exprs, Executor::high_scan_limit, RelAlgExecutionUnit::scan_limit, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

2734  {
2735  for (const auto target_expr : ra_exe_unit.target_exprs) {
2736  if (dynamic_cast<const Analyzer::AggExpr*>(target_expr)) {
2737  return false;
2738  }
2739  }
2740  if (ra_exe_unit.groupby_exprs.size() == 1 && !ra_exe_unit.groupby_exprs.front() &&
2741  (!ra_exe_unit.scan_limit || ra_exe_unit.scan_limit > Executor::high_scan_limit)) {
2742  return true;
2743  }
2744  return false;
2745 }
std::vector< Analyzer::Expr * > target_exprs
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
static const size_t high_scan_limit
Definition: Execute.h:453

+ Here is the caller graph for this function:

RelAlgExecutionUnit anonymous_namespace{RelAlgExecutor.cpp}::decide_approx_count_distinct_implementation ( const RelAlgExecutionUnit ra_exe_unit_in,
const std::vector< InputTableInfo > &  table_infos,
const Executor executor,
const ExecutorDeviceType  device_type_in,
std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned 
)

Definition at line 2752 of file RelAlgExecutor.cpp.

References Bitmap, CHECK, CHECK_GE, g_bigint_count, g_cluster, g_hll_precision_bits, get_agg_type(), get_count_distinct_sub_bitmap_count(), get_target_info(), getExpressionRange(), GPU, hll_size_for_rate(), i, Integer, kAPPROX_COUNT_DISTINCT, kCOUNT, kENCODING_DICT, kINT, and RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit(), and RelAlgExecutor::handleOutOfMemoryRetry().

2757  {
2758  RelAlgExecutionUnit ra_exe_unit = ra_exe_unit_in;
2759  for (size_t i = 0; i < ra_exe_unit.target_exprs.size(); ++i) {
2760  const auto target_expr = ra_exe_unit.target_exprs[i];
2761  const auto agg_info = get_target_info(target_expr, g_bigint_count);
2762  if (agg_info.agg_kind != kAPPROX_COUNT_DISTINCT) {
2763  continue;
2764  }
2765  CHECK(dynamic_cast<const Analyzer::AggExpr*>(target_expr));
2766  const auto arg = static_cast<Analyzer::AggExpr*>(target_expr)->get_own_arg();
2767  CHECK(arg);
2768  const auto& arg_ti = arg->get_type_info();
2769  // Avoid calling getExpressionRange for variable length types (string and array),
2770  // it'd trigger an assertion since that API expects to be called only for types
2771  // for which the notion of range is well-defined. A bit of a kludge, but the
2772  // logic to reject these types anyway is at lower levels in the stack and not
2773  // really worth pulling into a separate function for now.
2774  if (!(arg_ti.is_number() || arg_ti.is_boolean() || arg_ti.is_time() ||
2775  (arg_ti.is_string() && arg_ti.get_compression() == kENCODING_DICT))) {
2776  continue;
2777  }
2778  const auto arg_range = getExpressionRange(arg.get(), table_infos, executor);
2779  if (arg_range.getType() != ExpressionRangeType::Integer) {
2780  continue;
2781  }
2782  // When running distributed, the threshold for using the precise implementation
2783  // must be consistent across all leaves, otherwise we could have a mix of precise
2784  // and approximate bitmaps and we cannot aggregate them.
2785  const auto device_type = g_cluster ? ExecutorDeviceType::GPU : device_type_in;
2786  const auto bitmap_sz_bits = arg_range.getIntMax() - arg_range.getIntMin() + 1;
2787  const auto sub_bitmap_count =
2788  get_count_distinct_sub_bitmap_count(bitmap_sz_bits, ra_exe_unit, device_type);
2789  int64_t approx_bitmap_sz_bits{0};
2790  const auto error_rate =
2791  static_cast<Analyzer::AggExpr*>(target_expr)->get_error_rate();
2792  if (error_rate) {
2793  CHECK(error_rate->get_type_info().get_type() == kINT);
2794  CHECK_GE(error_rate->get_constval().intval, 1);
2795  approx_bitmap_sz_bits = hll_size_for_rate(error_rate->get_constval().intval);
2796  } else {
2797  approx_bitmap_sz_bits = g_hll_precision_bits;
2798  }
2799  CountDistinctDescriptor approx_count_distinct_desc{CountDistinctImplType::Bitmap,
2800  arg_range.getIntMin(),
2801  approx_bitmap_sz_bits,
2802  true,
2803  device_type,
2804  sub_bitmap_count};
2805  CountDistinctDescriptor precise_count_distinct_desc{CountDistinctImplType::Bitmap,
2806  arg_range.getIntMin(),
2807  bitmap_sz_bits,
2808  false,
2809  device_type,
2810  sub_bitmap_count};
2811  if (approx_count_distinct_desc.bitmapPaddedSizeBytes() >=
2812  precise_count_distinct_desc.bitmapPaddedSizeBytes()) {
2813  auto precise_count_distinct = makeExpr<Analyzer::AggExpr>(
2814  get_agg_type(kCOUNT, arg.get()), kCOUNT, arg, true, nullptr);
2815  target_exprs_owned.push_back(precise_count_distinct);
2816  ra_exe_unit.target_exprs[i] = precise_count_distinct.get();
2817  }
2818  }
2819  return ra_exe_unit;
2820 }
std::vector< Analyzer::Expr * > target_exprs
int hll_size_for_rate(const int err_percent)
Definition: HyperLogLog.h:115
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:79
#define CHECK_GE(x, y)
Definition: Logger.h:210
SQLTypeInfo get_agg_type(const SQLAgg agg_kind, const Analyzer::Expr *arg_expr)
int g_hll_precision_bits
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
bool g_bigint_count
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
Definition: sqldefs.h:76
#define CHECK(condition)
Definition: Logger.h:197
bool g_cluster
Definition: sqltypes.h:44

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::do_table_reordering ( std::vector< InputDescriptor > &  input_descs,
std::list< std::shared_ptr< const InputColDescriptor >> &  input_col_descs,
const JoinQualsPerNestingLevel left_deep_join_quals,
std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const RA *  node,
const std::vector< InputTableInfo > &  query_infos,
const Executor executor 
)

Definition at line 3441 of file RelAlgExecutor.cpp.

References cat(), CHECK, g_cluster, get_input_desc(), get_input_nest_levels(), get_node_input_permutation(), and table_is_replicated().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3448  {
3449  if (g_cluster) {
3450  // Disable table reordering in distributed mode. The aggregator does not have enough
3451  // information to break ties
3452  return {};
3453  }
3454  const auto& cat = *executor->getCatalog();
3455  for (const auto& table_info : query_infos) {
3456  if (table_info.table_id < 0) {
3457  continue;
3458  }
3459  const auto td = cat.getMetadataForTable(table_info.table_id);
3460  CHECK(td);
3461  if (table_is_replicated(td)) {
3462  return {};
3463  }
3464  }
3465  const auto input_permutation =
3466  get_node_input_permutation(left_deep_join_quals, query_infos, executor);
3467  input_to_nest_level = get_input_nest_levels(node, input_permutation);
3468  std::tie(input_descs, input_col_descs, std::ignore) =
3469  get_input_desc(node, input_to_nest_level, input_permutation, cat);
3470  return input_permutation;
3471 }
std::unordered_map< const RelAlgNode *, int > get_input_nest_levels(const RelAlgNode *ra_node, const std::vector< size_t > &input_permutation)
std::string cat(Ts &&...args)
std::vector< node_t > get_node_input_permutation(const JoinQualsPerNestingLevel &left_deep_join_quals, const std::vector< InputTableInfo > &table_infos, const Executor *executor)
bool table_is_replicated(const TableDescriptor *td)
#define CHECK(condition)
Definition: Logger.h:197
bool g_cluster
std::tuple< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > >, std::vector< std::shared_ptr< RexInput > > > get_input_desc(const RA *ra_node, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::exe_unit_has_quals ( const RelAlgExecutionUnit  ra_exe_unit)
inline

Definition at line 2747 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::join_quals, RelAlgExecutionUnit::quals, and RelAlgExecutionUnit::simple_quals.

Referenced by RelAlgExecutor::executeWorkUnit().

2747  {
2748  return !(ra_exe_unit.quals.empty() && ra_exe_unit.join_quals.empty() &&
2749  ra_exe_unit.simple_quals.empty());
2750 }
const JoinQualsPerNestingLevel join_quals
std::list< std::shared_ptr< Analyzer::Expr > > quals
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::first_oe_is_desc ( const std::list< Analyzer::OrderEntry > &  order_entries)

Definition at line 2516 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

2516  {
2517  return !order_entries.empty() && order_entries.front().is_desc;
2518 }

+ Here is the caller graph for this function:

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals ( const RexScalar scalar)

Definition at line 3360 of file RelAlgExecutor.cpp.

References CHECK_EQ, kAND, kBW_EQ, kEQ, kISNULL, kOR, and RexVisitorBase< T >::visit().

Referenced by get_bitwise_equals_conjunction().

3360  {
3361  const auto condition = dynamic_cast<const RexOperator*>(scalar);
3362  if (!condition || condition->getOperator() != kOR || condition->size() != 2) {
3363  return nullptr;
3364  }
3365  const auto equi_join_condition =
3366  dynamic_cast<const RexOperator*>(condition->getOperand(0));
3367  if (!equi_join_condition || equi_join_condition->getOperator() != kEQ) {
3368  return nullptr;
3369  }
3370  const auto both_are_null_condition =
3371  dynamic_cast<const RexOperator*>(condition->getOperand(1));
3372  if (!both_are_null_condition || both_are_null_condition->getOperator() != kAND ||
3373  both_are_null_condition->size() != 2) {
3374  return nullptr;
3375  }
3376  const auto lhs_is_null =
3377  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(0));
3378  const auto rhs_is_null =
3379  dynamic_cast<const RexOperator*>(both_are_null_condition->getOperand(1));
3380  if (!lhs_is_null || !rhs_is_null || lhs_is_null->getOperator() != kISNULL ||
3381  rhs_is_null->getOperator() != kISNULL) {
3382  return nullptr;
3383  }
3384  CHECK_EQ(size_t(1), lhs_is_null->size());
3385  CHECK_EQ(size_t(1), rhs_is_null->size());
3386  CHECK_EQ(size_t(2), equi_join_condition->size());
3387  const auto eq_lhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(0));
3388  const auto eq_rhs = dynamic_cast<const RexInput*>(equi_join_condition->getOperand(1));
3389  const auto is_null_lhs = dynamic_cast<const RexInput*>(lhs_is_null->getOperand(0));
3390  const auto is_null_rhs = dynamic_cast<const RexInput*>(rhs_is_null->getOperand(0));
3391  if (!eq_lhs || !eq_rhs || !is_null_lhs || !is_null_rhs) {
3392  return nullptr;
3393  }
3394  std::vector<std::unique_ptr<const RexScalar>> eq_operands;
3395  if (*eq_lhs == *is_null_lhs && *eq_rhs == *is_null_rhs) {
3396  RexDeepCopyVisitor deep_copy_visitor;
3397  auto lhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(0));
3398  auto rhs_op_copy = deep_copy_visitor.visit(equi_join_condition->getOperand(1));
3399  eq_operands.emplace_back(lhs_op_copy.release());
3400  eq_operands.emplace_back(rhs_op_copy.release());
3401  return boost::make_unique<const RexOperator>(
3402  kBW_EQ, eq_operands, equi_join_condition->getType());
3403  }
3404  return nullptr;
3405 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
Definition: sqldefs.h:38
Definition: sqldefs.h:30
virtual T visit(const RexScalar *rex_scalar) const
Definition: RexVisitor.h:27
Definition: sqldefs.h:37
Definition: sqldefs.h:31

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unique_ptr<const RexOperator> anonymous_namespace{RelAlgExecutor.cpp}::get_bitwise_equals_conjunction ( const RexScalar scalar)

Definition at line 3407 of file RelAlgExecutor.cpp.

References CHECK_GE, get_bitwise_equals(), i, and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

3408  {
3409  const auto condition = dynamic_cast<const RexOperator*>(scalar);
3410  if (condition && condition->getOperator() == kAND) {
3411  CHECK_GE(condition->size(), size_t(2));
3412  auto acc = get_bitwise_equals(condition->getOperand(0));
3413  if (!acc) {
3414  return nullptr;
3415  }
3416  for (size_t i = 1; i < condition->size(); ++i) {
3417  std::vector<std::unique_ptr<const RexScalar>> and_operands;
3418  and_operands.emplace_back(std::move(acc));
3419  and_operands.emplace_back(get_bitwise_equals_conjunction(condition->getOperand(i)));
3420  acc =
3421  boost::make_unique<const RexOperator>(kAND, and_operands, condition->getType());
3422  }
3423  return acc;
3424  }
3425  return get_bitwise_equals(scalar);
3426 }
std::unique_ptr< const RexOperator > get_bitwise_equals_conjunction(const RexScalar *scalar)
#define CHECK_GE(x, y)
Definition: Logger.h:210
Definition: sqldefs.h:37
std::unique_ptr< const RexOperator > get_bitwise_equals(const RexScalar *scalar)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const RelAlgNode* anonymous_namespace{RelAlgExecutor.cpp}::get_data_sink ( const RelAlgNode ra_node)

Definition at line 888 of file RelAlgExecutor.cpp.

References CHECK_EQ, RelAlgNode::getInput(), RelAlgNode::inputCount(), and join().

Referenced by get_input_desc_impl(), get_input_nest_levels(), get_inputs_meta(), get_join_source_used_inputs(), get_join_type(), and get_used_inputs().

888  {
889  if (auto table_func = dynamic_cast<const RelTableFunction*>(ra_node)) {
890  return table_func;
891  }
892  if (auto join = dynamic_cast<const RelJoin*>(ra_node)) {
893  CHECK_EQ(size_t(2), join->inputCount());
894  return join;
895  }
896  if (!dynamic_cast<const RelLogicalUnion*>(ra_node)) {
897  CHECK_EQ(size_t(1), ra_node->inputCount());
898  }
899  auto only_src = ra_node->getInput(0);
900  const bool is_join = dynamic_cast<const RelJoin*>(only_src) ||
901  dynamic_cast<const RelLeftDeepInnerJoin*>(only_src);
902  return is_join ? only_src : ra_node;
903 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::string join(T const &container, std::string const &delim)
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::tuple<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> >, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc ( const RA *  ra_node,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1179 of file RelAlgExecutor.cpp.

References get_input_desc_impl(), get_used_inputs(), and VLOG.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), and do_table_reordering().

1182  {
1183  std::unordered_set<const RexInput*> used_inputs;
1184  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
1185  std::tie(used_inputs, used_inputs_owned) = get_used_inputs(ra_node, cat);
1186  VLOG(3) << "used_inputs.size() = " << used_inputs.size();
1187  auto input_desc_pair = get_input_desc_impl(
1188  ra_node, used_inputs, input_to_nest_level, input_permutation, cat);
1189  return std::make_tuple(
1190  input_desc_pair.first, input_desc_pair.second, used_inputs_owned);
1191 }
std::pair< std::vector< InputDescriptor >, std::list< std::shared_ptr< const InputColDescriptor > > > get_input_desc_impl(const RA *ra_node, const std::unordered_set< const RexInput * > &used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level, const std::vector< size_t > &input_permutation, const Catalog_Namespace::Catalog &cat)
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_used_inputs(const RelCompound *compound, const Catalog_Namespace::Catalog &cat)
#define VLOG(n)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::pair<std::vector<InputDescriptor>, std::list<std::shared_ptr<const InputColDescriptor> > > anonymous_namespace{RelAlgExecutor.cpp}::get_input_desc_impl ( const RA *  ra_node,
const std::unordered_set< const RexInput * > &  used_inputs,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level,
const std::vector< size_t > &  input_permutation,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1120 of file RelAlgExecutor.cpp.

References collect_used_input_desc(), get_data_sink(), get_join_source_used_inputs(), InputDescriptor::getNestLevel(), gpu_enabled::sort(), and table_id_from_ra().

Referenced by get_input_desc().

1124  {
1125  std::vector<InputDescriptor> input_descs;
1126  const auto data_sink_node = get_data_sink(ra_node);
1127  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
1128  const auto input_node_idx =
1129  input_permutation.empty() ? input_idx : input_permutation[input_idx];
1130  auto input_ra = data_sink_node->getInput(input_node_idx);
1131  const int table_id = table_id_from_ra(input_ra);
1132  input_descs.emplace_back(table_id, input_idx);
1133  }
1134  std::sort(input_descs.begin(),
1135  input_descs.end(),
1136  [](const InputDescriptor& lhs, const InputDescriptor& rhs) {
1137  return lhs.getNestLevel() < rhs.getNestLevel();
1138  });
1139  std::unordered_set<std::shared_ptr<const InputColDescriptor>> input_col_descs_unique;
1140  collect_used_input_desc(input_descs,
1141  cat,
1142  input_col_descs_unique, // modified
1143  ra_node,
1144  used_inputs,
1145  input_to_nest_level);
1146  std::unordered_set<const RexInput*> join_source_used_inputs;
1147  std::vector<std::shared_ptr<RexInput>> join_source_used_inputs_owned;
1148  std::tie(join_source_used_inputs, join_source_used_inputs_owned) =
1149  get_join_source_used_inputs(ra_node, cat);
1150  collect_used_input_desc(input_descs,
1151  cat,
1152  input_col_descs_unique, // modified
1153  ra_node,
1154  join_source_used_inputs,
1155  input_to_nest_level);
1156  std::vector<std::shared_ptr<const InputColDescriptor>> input_col_descs(
1157  input_col_descs_unique.begin(), input_col_descs_unique.end());
1158 
1159  std::sort(input_col_descs.begin(),
1160  input_col_descs.end(),
1161  [](std::shared_ptr<const InputColDescriptor> const& lhs,
1162  std::shared_ptr<const InputColDescriptor> const& rhs) {
1163  return std::make_tuple(lhs->getScanDesc().getNestLevel(),
1164  lhs->getColId(),
1165  lhs->getScanDesc().getTableId()) <
1166  std::make_tuple(rhs->getScanDesc().getNestLevel(),
1167  rhs->getColId(),
1168  rhs->getScanDesc().getTableId());
1169  });
1170  return {input_descs,
1171  std::list<std::shared_ptr<const InputColDescriptor>>(input_col_descs.begin(),
1172  input_col_descs.end())};
1173 }
void collect_used_input_desc(std::vector< InputDescriptor > &input_descs, const Catalog_Namespace::Catalog &cat, std::unordered_set< std::shared_ptr< const InputColDescriptor >> &input_col_descs_unique, const RelAlgNode *ra_node, const std::unordered_set< const RexInput * > &source_used_inputs, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
DEVICE void sort(ARGS &&...args)
Definition: gpu_enabled.h:105
std::pair< std::unordered_set< const RexInput * >, std::vector< std::shared_ptr< RexInput > > > get_join_source_used_inputs(const RelAlgNode *ra_node, const Catalog_Namespace::Catalog &cat)
int table_id_from_ra(const RelAlgNode *ra_node)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)
int getNestLevel() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unordered_map<const RelAlgNode*, int> anonymous_namespace{RelAlgExecutor.cpp}::get_input_nest_levels ( const RelAlgNode ra_node,
const std::vector< size_t > &  input_permutation 
)

Definition at line 1025 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), logger::INFO, and LOG_IF.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), and do_table_reordering().

1027  {
1028  const auto data_sink_node = get_data_sink(ra_node);
1029  std::unordered_map<const RelAlgNode*, int> input_to_nest_level;
1030  for (size_t input_idx = 0; input_idx < data_sink_node->inputCount(); ++input_idx) {
1031  const auto input_node_idx =
1032  input_permutation.empty() ? input_idx : input_permutation[input_idx];
1033  const auto input_ra = data_sink_node->getInput(input_node_idx);
1034  // Having a non-zero mapped value (input_idx) results in the query being interpretted
1035  // as a JOIN within CodeGenerator::codegenColVar() due to rte_idx being set to the
1036  // mapped value (input_idx) which originates here. This would be incorrect for UNION.
1037  size_t const idx = dynamic_cast<const RelLogicalUnion*>(ra_node) ? 0 : input_idx;
1038  const auto it_ok = input_to_nest_level.emplace(input_ra, idx);
1039  CHECK(it_ok.second);
1040  LOG_IF(INFO, !input_permutation.empty())
1041  << "Assigned input " << input_ra->toString() << " to nest level " << input_idx;
1042  }
1043  return input_to_nest_level;
1044 }
#define LOG_IF(severity, condition)
Definition: Logger.h:287
#define CHECK(condition)
Definition: Logger.h:197
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::vector<TargetMetaInfo>, std::vector<std::shared_ptr<Analyzer::Expr> > > anonymous_namespace{RelAlgExecutor.cpp}::get_inputs_meta ( const RelFilter filter,
const RelAlgTranslator translator,
const std::vector< std::shared_ptr< RexInput >> &  inputs_owned,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 4129 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), get_exprs_not_owned(), get_targets_meta(), i, synthesize_inputs(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createFilterWorkUnit().

4132  {
4133  std::vector<TargetMetaInfo> in_metainfo;
4134  std::vector<std::shared_ptr<Analyzer::Expr>> exprs_owned;
4135  const auto data_sink_node = get_data_sink(filter);
4136  auto input_it = inputs_owned.begin();
4137  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
4138  const auto source = data_sink_node->getInput(nest_level);
4139  const auto scan_source = dynamic_cast<const RelScan*>(source);
4140  if (scan_source) {
4141  CHECK(source->getOutputMetainfo().empty());
4142  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources_owned;
4143  for (size_t i = 0; i < scan_source->size(); ++i, ++input_it) {
4144  scalar_sources_owned.push_back(translator.translateScalarRex(input_it->get()));
4145  }
4146  const auto source_metadata =
4147  get_targets_meta(scan_source, get_exprs_not_owned(scalar_sources_owned));
4148  in_metainfo.insert(
4149  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
4150  exprs_owned.insert(
4151  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
4152  } else {
4153  const auto& source_metadata = source->getOutputMetainfo();
4154  input_it += source_metadata.size();
4155  in_metainfo.insert(
4156  in_metainfo.end(), source_metadata.begin(), source_metadata.end());
4157  const auto scalar_sources_owned = synthesize_inputs(
4158  data_sink_node, nest_level, source_metadata, input_to_nest_level);
4159  exprs_owned.insert(
4160  exprs_owned.end(), scalar_sources_owned.begin(), scalar_sources_owned.end());
4161  }
4162  }
4163  return std::make_pair(in_metainfo, exprs_owned);
4164 }
std::vector< std::shared_ptr< Analyzer::Expr > > synthesize_inputs(const RelAlgNode *ra_node, const size_t nest_level, const std::vector< TargetMetaInfo > &in_metainfo, const std::unordered_map< const RelAlgNode *, int > &input_to_nest_level)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
std::vector< Analyzer::Expr * > get_exprs_not_owned(const std::vector< std::shared_ptr< Analyzer::Expr >> &exprs)
Definition: Execute.h:253
std::vector< TargetMetaInfo > get_targets_meta(const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
#define CHECK(condition)
Definition: Logger.h:197
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_join_source_used_inputs ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog cat 
)

Definition at line 1047 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, CHECK_GT, get_data_sink(), anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelAlgNode::inputCount(), join(), run_benchmark_import::result, RelAlgNode::toString(), and RexVisitorBase< T >::visit().

Referenced by get_input_desc_impl().

1048  {
1049  const auto data_sink_node = get_data_sink(ra_node);
1050  if (auto join = dynamic_cast<const RelJoin*>(data_sink_node)) {
1051  CHECK_EQ(join->inputCount(), 2u);
1052  const auto condition = join->getCondition();
1053  RexUsedInputsVisitor visitor(cat);
1054  auto condition_inputs = visitor.visit(condition);
1055  std::vector<std::shared_ptr<RexInput>> condition_inputs_owned(
1056  visitor.get_inputs_owned());
1057  return std::make_pair(condition_inputs, condition_inputs_owned);
1058  }
1059 
1060  if (auto left_deep_join = dynamic_cast<const RelLeftDeepInnerJoin*>(data_sink_node)) {
1061  CHECK_GE(left_deep_join->inputCount(), 2u);
1062  const auto condition = left_deep_join->getInnerCondition();
1063  RexUsedInputsVisitor visitor(cat);
1064  auto result = visitor.visit(condition);
1065  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
1066  ++nesting_level) {
1067  const auto outer_condition = left_deep_join->getOuterCondition(nesting_level);
1068  if (outer_condition) {
1069  const auto outer_result = visitor.visit(outer_condition);
1070  result.insert(outer_result.begin(), outer_result.end());
1071  }
1072  }
1073  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
1074  return std::make_pair(result, used_inputs_owned);
1075  }
1076 
1077  if (dynamic_cast<const RelLogicalUnion*>(ra_node)) {
1078  CHECK_GT(ra_node->inputCount(), 1u) << ra_node->toString();
1079  } else if (dynamic_cast<const RelTableFunction*>(ra_node)) {
1080  CHECK_GT(ra_node->inputCount(), 0u) << ra_node->toString();
1081  } else {
1082  CHECK_EQ(ra_node->inputCount(), 1u) << ra_node->toString();
1083  }
1084  return std::make_pair(std::unordered_set<const RexInput*>{},
1085  std::vector<std::shared_ptr<RexInput>>{});
1086 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::string join(T const &container, std::string const &delim)
#define CHECK_GE(x, y)
Definition: Logger.h:210
#define CHECK_GT(x, y)
Definition: Logger.h:209
virtual std::string toString() const =0
const size_t inputCount() const
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

JoinType anonymous_namespace{RelAlgExecutor.cpp}::get_join_type ( const RelAlgNode ra)

Definition at line 3348 of file RelAlgExecutor.cpp.

References get_data_sink(), INNER, INVALID, and join().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createFilterWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3348  {
3349  auto sink = get_data_sink(ra);
3350  if (auto join = dynamic_cast<const RelJoin*>(sink)) {
3351  return join->getJoinType();
3352  }
3353  if (dynamic_cast<const RelLeftDeepInnerJoin*>(sink)) {
3354  return JoinType::INNER;
3355  }
3356 
3357  return JoinType::INVALID;
3358 }
std::string join(T const &container, std::string const &delim)
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<size_t> anonymous_namespace{RelAlgExecutor.cpp}::get_left_deep_join_input_sizes ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 3473 of file RelAlgExecutor.cpp.

References get_node_output(), RelAlgNode::getInput(), i, and RelAlgNode::inputCount().

Referenced by RelAlgExecutor::createCompoundWorkUnit(), and RelAlgExecutor::createProjectWorkUnit().

3474  {
3475  std::vector<size_t> input_sizes;
3476  for (size_t i = 0; i < left_deep_join->inputCount(); ++i) {
3477  const auto inputs = get_node_output(left_deep_join->getInput(i));
3478  input_sizes.push_back(inputs.size());
3479  }
3480  return input_sizes;
3481 }
const RelAlgNode * getInput(const size_t idx) const
RANodeOutput get_node_output(const RelAlgNode *ra_node)
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

SQLTypeInfo anonymous_namespace{RelAlgExecutor.cpp}::get_logical_type_for_expr ( const Analyzer::Expr expr)
inline

Definition at line 1434 of file RelAlgExecutor.cpp.

References get_logical_type_info(), get_nullable_logical_type_info(), Analyzer::Expr::get_type_info(), is_agg(), is_count_distinct(), and kBIGINT.

Referenced by get_targets_meta().

1434  {
1435  if (is_count_distinct(&expr)) {
1436  return SQLTypeInfo(kBIGINT, false);
1437  } else if (is_agg(&expr)) {
1439  }
1440  return get_logical_type_info(expr.get_type_info());
1441 }
bool is_agg(const Analyzer::Expr *expr)
SQLTypeInfo get_nullable_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:931
SQLTypeInfo get_logical_type_info(const SQLTypeInfo &type_info)
Definition: sqltypes.h:910
bool is_count_distinct(const Analyzer::Expr *expr)
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:78

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<Analyzer::OrderEntry> anonymous_namespace{RelAlgExecutor.cpp}::get_order_entries ( const RelSort sort)

Definition at line 2496 of file RelAlgExecutor.cpp.

References RelSort::collationCount(), Descending, First, RelSort::getCollation(), i, and run_benchmark_import::result.

Referenced by RelAlgExecutor::createSortInputWorkUnit(), and RelAlgExecutor::executeSort().

2496  {
2497  std::list<Analyzer::OrderEntry> result;
2498  for (size_t i = 0; i < sort->collationCount(); ++i) {
2499  const auto sort_field = sort->getCollation(i);
2500  result.emplace_back(sort_field.getField() + 1,
2501  sort_field.getSortDir() == SortDirection::Descending,
2502  sort_field.getNullsPosition() == NullSortedPosition::First);
2503  }
2504  return result;
2505 }
SortField getCollation(const size_t i) const
size_t collationCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::unordered_set<PhysicalInput> anonymous_namespace{RelAlgExecutor.cpp}::get_physical_inputs ( const Catalog_Namespace::Catalog cat,
const RelAlgNode ra 
)

Definition at line 64 of file RelAlgExecutor.cpp.

References get_physical_inputs(), and Catalog_Namespace::Catalog::getColumnIdBySpi().

66  {
67  auto phys_inputs = get_physical_inputs(ra);
68  std::unordered_set<PhysicalInput> phys_inputs2;
69  for (auto& phi : phys_inputs) {
70  phys_inputs2.insert(
71  PhysicalInput{cat.getColumnIdBySpi(phi.table_id, phi.col_id), phi.table_id});
72  }
73  return phys_inputs2;
74 }
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1543

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelCompound compound)

Definition at line 1193 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSourcesSize().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1193  {
1194  return compound->getScalarSourcesSize();
1195 }
const size_t getScalarSourcesSize() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelProject project)

Definition at line 1197 of file RelAlgExecutor.cpp.

References RelProject::size().

1197  {
1198  return project->size();
1199 }
size_t size() const override

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scalar_sources_size ( const RelTableFunction table_func)

Definition at line 1201 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputsSize().

1201  {
1202  return table_func->getTableFuncInputsSize();
1203 }
size_t getTableFuncInputsSize() const

+ Here is the call graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::get_scan_limit ( const RelAlgNode ra,
const size_t  limit 
)

Definition at line 2507 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::createSortInputWorkUnit().

2507  {
2508  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
2509  if (aggregate) {
2510  return 0;
2511  }
2512  const auto compound = dynamic_cast<const RelCompound*>(ra);
2513  return (compound && compound->isAggregate()) ? 0 : limit;
2514 }

+ Here is the caller graph for this function:

const TableDescriptor* anonymous_namespace{RelAlgExecutor.cpp}::get_shard_for_key ( const TableDescriptor td,
const Catalog_Namespace::Catalog cat,
const Fragmenter_Namespace::InsertData data 
)

Definition at line 2184 of file RelAlgExecutor.cpp.

References CHECK, Fragmenter_Namespace::InsertData::columnIds, Fragmenter_Namespace::InsertData::data, Catalog_Namespace::Catalog::getPhysicalTablesDescriptors(), Catalog_Namespace::Catalog::getShardColumnMetadataForTable(), i, int_value_from_numbers_ptr(), and SHARD_FOR_KEY.

Referenced by RelAlgExecutor::executeSimpleInsert().

2186  {
2187  auto shard_column_md = cat.getShardColumnMetadataForTable(td);
2188  CHECK(shard_column_md);
2189  auto sharded_column_id = shard_column_md->columnId;
2190  const TableDescriptor* shard{nullptr};
2191  for (size_t i = 0; i < data.columnIds.size(); ++i) {
2192  if (data.columnIds[i] == sharded_column_id) {
2193  const auto shard_tables = cat.getPhysicalTablesDescriptors(td);
2194  const auto shard_count = shard_tables.size();
2195  CHECK(data.data[i].numbersPtr);
2196  auto value = int_value_from_numbers_ptr(shard_column_md->columnType,
2197  data.data[i].numbersPtr);
2198  const size_t shard_idx = SHARD_FOR_KEY(value, shard_count);
2199  shard = shard_tables[shard_idx];
2200  break;
2201  }
2202  }
2203  return shard;
2204 }
int64_t int_value_from_numbers_ptr(const SQLTypeInfo &type_info, const int8_t *data)
const ColumnDescriptor * getShardColumnMetadataForTable(const TableDescriptor *td) const
Definition: Catalog.cpp:3995
std::vector< const TableDescriptor * > getPhysicalTablesDescriptors(const TableDescriptor *logical_table_desc, bool populate_fragmenter=true) const
Definition: Catalog.cpp:4013
std::vector< DataBlockPtr > data
the number of rows being inserted
Definition: Fragmenter.h:64
#define CHECK(condition)
Definition: Logger.h:197
std::vector< int > columnIds
identifies the table into which the data is being inserted
Definition: Fragmenter.h:62
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<TargetMetaInfo> anonymous_namespace{RelAlgExecutor.cpp}::get_targets_meta ( const RA *  ra_node,
const std::vector< Analyzer::Expr * > &  target_exprs 
)

Definition at line 1444 of file RelAlgExecutor.cpp.

References CHECK, CHECK_EQ, get_logical_type_for_expr(), and i.

Referenced by RelAlgExecutor::createAggregateWorkUnit(), RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), RelAlgExecutor::createTableFunctionWorkUnit(), RelAlgExecutor::createUnionWorkUnit(), get_inputs_meta(), and get_targets_meta().

1446  {
1447  std::vector<TargetMetaInfo> targets_meta;
1448  CHECK_EQ(ra_node->size(), target_exprs.size());
1449  for (size_t i = 0; i < ra_node->size(); ++i) {
1450  CHECK(target_exprs[i]);
1451  // TODO(alex): remove the count distinct type fixup.
1452  targets_meta.emplace_back(ra_node->getFieldName(i),
1453  get_logical_type_for_expr(*target_exprs[i]),
1454  target_exprs[i]->get_type_info());
1455  }
1456  return targets_meta;
1457 }
#define CHECK_EQ(x, y)
Definition: Logger.h:205
SQLTypeInfo get_logical_type_for_expr(const Analyzer::Expr &expr)
#define CHECK(condition)
Definition: Logger.h:197

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<>
std::vector<TargetMetaInfo> anonymous_namespace{RelAlgExecutor.cpp}::get_targets_meta ( const RelFilter filter,
const std::vector< Analyzer::Expr * > &  target_exprs 
)

Definition at line 1460 of file RelAlgExecutor.cpp.

References get_targets_meta(), RelAlgNode::getInput(), RelAlgNode::toString(), and UNREACHABLE.

1462  {
1463  RelAlgNode const* input0 = filter->getInput(0);
1464  if (auto const* input = dynamic_cast<RelCompound const*>(input0)) {
1465  return get_targets_meta(input, target_exprs);
1466  } else if (auto const* input = dynamic_cast<RelProject const*>(input0)) {
1467  return get_targets_meta(input, target_exprs);
1468  } else if (auto const* input = dynamic_cast<RelLogicalUnion const*>(input0)) {
1469  return get_targets_meta(input, target_exprs);
1470  } else if (auto const* input = dynamic_cast<RelAggregate const*>(input0)) {
1471  return get_targets_meta(input, target_exprs);
1472  } else if (auto const* input = dynamic_cast<RelScan const*>(input0)) {
1473  return get_targets_meta(input, target_exprs);
1474  }
1475  UNREACHABLE() << "Unhandled node type: " << input0->toString();
1476  return {};
1477 }
#define UNREACHABLE()
Definition: Logger.h:241
const RelAlgNode * getInput(const size_t idx) const
std::vector< TargetMetaInfo > get_targets_meta(const RA *ra_node, const std::vector< Analyzer::Expr * > &target_exprs)
virtual std::string toString() const =0

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelCompound compound,
const Catalog_Namespace::Catalog cat 
)

Definition at line 906 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelCompound::getFilterExpr(), RelCompound::getScalarSource(), RelCompound::getScalarSourcesSize(), i, and RexVisitorBase< T >::visit().

Referenced by get_input_desc().

906  {
907  RexUsedInputsVisitor visitor(cat);
908  const auto filter_expr = compound->getFilterExpr();
909  std::unordered_set<const RexInput*> used_inputs =
910  filter_expr ? visitor.visit(filter_expr) : std::unordered_set<const RexInput*>{};
911  const auto sources_size = compound->getScalarSourcesSize();
912  for (size_t i = 0; i < sources_size; ++i) {
913  const auto source_inputs = visitor.visit(compound->getScalarSource(i));
914  used_inputs.insert(source_inputs.begin(), source_inputs.end());
915  }
916  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
917  return std::make_pair(used_inputs, used_inputs_owned);
918 }
const RexScalar * getFilterExpr() const
const size_t getScalarSourcesSize() const
const RexScalar * getScalarSource(const size_t i) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelAggregate aggregate,
const Catalog_Namespace::Catalog cat 
)

Definition at line 921 of file RelAlgExecutor.cpp.

References CHECK_EQ, CHECK_GE, RelAggregate::getAggExprs(), RelAggregate::getGroupByCount(), RelAlgNode::getInput(), RelAlgNode::getOutputMetainfo(), i, and RelAlgNode::inputCount().

921  {
922  CHECK_EQ(size_t(1), aggregate->inputCount());
923  std::unordered_set<const RexInput*> used_inputs;
924  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
925  const auto source = aggregate->getInput(0);
926  const auto& in_metainfo = source->getOutputMetainfo();
927  const auto group_count = aggregate->getGroupByCount();
928  CHECK_GE(in_metainfo.size(), group_count);
929  for (size_t i = 0; i < group_count; ++i) {
930  auto synthesized_used_input = new RexInput(source, i);
931  used_inputs_owned.emplace_back(synthesized_used_input);
932  used_inputs.insert(synthesized_used_input);
933  }
934  for (const auto& agg_expr : aggregate->getAggExprs()) {
935  for (size_t i = 0; i < agg_expr->size(); ++i) {
936  const auto operand_idx = agg_expr->getOperand(i);
937  CHECK_GE(in_metainfo.size(), static_cast<size_t>(operand_idx));
938  auto synthesized_used_input = new RexInput(source, operand_idx);
939  used_inputs_owned.emplace_back(synthesized_used_input);
940  used_inputs.insert(synthesized_used_input);
941  }
942  }
943  return std::make_pair(used_inputs, used_inputs_owned);
944 }
const size_t getGroupByCount() const
#define CHECK_EQ(x, y)
Definition: Logger.h:205
#define CHECK_GE(x, y)
Definition: Logger.h:210
const RelAlgNode * getInput(const size_t idx) const
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
const size_t inputCount() const
const std::vector< TargetMetaInfo > & getOutputMetainfo() const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelProject project,
const Catalog_Namespace::Catalog cat 
)

Definition at line 947 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelProject::getProjectAt(), i, RelProject::size(), and RexVisitorBase< T >::visit().

947  {
948  RexUsedInputsVisitor visitor(cat);
949  std::unordered_set<const RexInput*> used_inputs;
950  for (size_t i = 0; i < project->size(); ++i) {
951  const auto proj_inputs = visitor.visit(project->getProjectAt(i));
952  used_inputs.insert(proj_inputs.begin(), proj_inputs.end());
953  }
954  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
955  return std::make_pair(used_inputs, used_inputs_owned);
956 }
size_t size() const override
const RexScalar * getProjectAt(const size_t idx) const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelTableFunction table_func,
const Catalog_Namespace::Catalog cat 
)

Definition at line 959 of file RelAlgExecutor.cpp.

References anonymous_namespace{RelAlgExecutor.cpp}::RexUsedInputsVisitor::get_inputs_owned(), RelTableFunction::getTableFuncInputAt(), RelTableFunction::getTableFuncInputsSize(), i, and RexVisitorBase< T >::visit().

960  {
961  RexUsedInputsVisitor visitor(cat);
962  std::unordered_set<const RexInput*> used_inputs;
963  for (size_t i = 0; i < table_func->getTableFuncInputsSize(); ++i) {
964  const auto table_func_inputs = visitor.visit(table_func->getTableFuncInputAt(i));
965  used_inputs.insert(table_func_inputs.begin(), table_func_inputs.end());
966  }
967  std::vector<std::shared_ptr<RexInput>> used_inputs_owned(visitor.get_inputs_owned());
968  return std::make_pair(used_inputs, used_inputs_owned);
969 }
size_t getTableFuncInputsSize() const
const RexScalar * getTableFuncInputAt(const size_t idx) const

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelFilter filter,
const Catalog_Namespace::Catalog cat 
)

Definition at line 972 of file RelAlgExecutor.cpp.

References CHECK, get_data_sink(), and i.

972  {
973  std::unordered_set<const RexInput*> used_inputs;
974  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
975  const auto data_sink_node = get_data_sink(filter);
976  for (size_t nest_level = 0; nest_level < data_sink_node->inputCount(); ++nest_level) {
977  const auto source = data_sink_node->getInput(nest_level);
978  const auto scan_source = dynamic_cast<const RelScan*>(source);
979  if (scan_source) {
980  CHECK(source->getOutputMetainfo().empty());
981  for (size_t i = 0; i < scan_source->size(); ++i) {
982  auto synthesized_used_input = new RexInput(scan_source, i);
983  used_inputs_owned.emplace_back(synthesized_used_input);
984  used_inputs.insert(synthesized_used_input);
985  }
986  } else {
987  const auto& partial_in_metadata = source->getOutputMetainfo();
988  for (size_t i = 0; i < partial_in_metadata.size(); ++i) {
989  auto synthesized_used_input = new RexInput(source, i);
990  used_inputs_owned.emplace_back(synthesized_used_input);
991  used_inputs.insert(synthesized_used_input);
992  }
993  }
994  }
995  return std::make_pair(used_inputs, used_inputs_owned);
996 }
#define CHECK(condition)
Definition: Logger.h:197
const RelAlgNode * get_data_sink(const RelAlgNode *ra_node)

+ Here is the call graph for this function:

std::pair<std::unordered_set<const RexInput*>, std::vector<std::shared_ptr<RexInput> > > anonymous_namespace{RelAlgExecutor.cpp}::get_used_inputs ( const RelLogicalUnion logical_union,
const Catalog_Namespace::Catalog  
)

Definition at line 999 of file RelAlgExecutor.cpp.

References RelAlgNode::getInput(), i, RelAlgNode::inputCount(), and VLOG.

999  {
1000  std::unordered_set<const RexInput*> used_inputs(logical_union->inputCount());
1001  std::vector<std::shared_ptr<RexInput>> used_inputs_owned;
1002  used_inputs_owned.reserve(logical_union->inputCount());
1003  VLOG(3) << "logical_union->inputCount()=" << logical_union->inputCount();
1004  auto const n_inputs = logical_union->inputCount();
1005  for (size_t nest_level = 0; nest_level < n_inputs; ++nest_level) {
1006  auto input = logical_union->getInput(nest_level);
1007  for (size_t i = 0; i < input->size(); ++i) {
1008  used_inputs_owned.emplace_back(std::make_shared<RexInput>(input, i));
1009  used_inputs.insert(used_inputs_owned.back().get());
1010  }
1011  }
1012  return std::make_pair(std::move(used_inputs), std::move(used_inputs_owned));
1013 }
const RelAlgNode * getInput(const size_t idx) const
const size_t inputCount() const
#define VLOG(n)
Definition: Logger.h:291

+ Here is the call graph for this function:

ErrorInfo anonymous_namespace{RelAlgExecutor.cpp}::getErrorDescription ( const int32_t  error_code)

Definition at line 3251 of file RelAlgExecutor.cpp.

References Executor::ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED, Executor::ERR_DIV_BY_ZERO, Executor::ERR_GEOS, Executor::ERR_INTERRUPTED, Executor::ERR_OUT_OF_CPU_MEM, Executor::ERR_OUT_OF_GPU_MEM, Executor::ERR_OUT_OF_RENDER_MEM, Executor::ERR_OUT_OF_TIME, Executor::ERR_OVERFLOW_OR_UNDERFLOW, Executor::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES, Executor::ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY, Executor::ERR_STRING_CONST_IN_RESULTSET, Executor::ERR_TOO_MANY_LITERALS, and Executor::ERR_UNSUPPORTED_SELF_JOIN.

Referenced by RelAlgExecutor::getErrorMessageFromCode().

3251  {
3252  switch (error_code) {
3254  return {.code = "ERR_DIV_BY_ZERO", .description = "Division by zero"};
3256  return {.code = "ERR_OUT_OF_GPU_MEM",
3257  .description =
3258  "Query couldn't keep the entire working set of columns in GPU memory"};
3260  return {.code = "ERR_UNSUPPORTED_SELF_JOIN",
3261  .description = "Self joins not supported yet"};
3263  return {.code = "ERR_OUT_OF_CPU_MEM",
3264  .description = "Not enough host memory to execute the query"};
3266  return {.code = "ERR_OVERFLOW_OR_UNDERFLOW",
3267  .description = "Overflow or underflow"};
3269  return {.code = "ERR_OUT_OF_TIME",
3270  .description = "Query execution has exceeded the time limit"};
3272  return {.code = "ERR_INTERRUPTED",
3273  .description = "Query execution has been interrupted"};
3275  return {
3276  .code = "ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED",
3277  .description = "Columnar conversion not supported for variable length types"};
3279  return {.code = "ERR_TOO_MANY_LITERALS",
3280  .description = "Too many literals in the query"};
3282  return {.code = "ERR_STRING_CONST_IN_RESULTSET",
3283  .description =
3284  "NONE ENCODED String types are not supported as input result set."};
3286  return {.code = "ERR_OUT_OF_RENDER_MEM",
3287  .description = "Not enough OpenGL memory to render the query results"};
3289  return {.code = "ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY",
3290  .description = "Streaming-Top-N not supported in Render Query"};
3292  return {.code = "ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES",
3293  .description = "Multiple distinct values encountered"};
3294  case Executor::ERR_GEOS:
3295  return {.code = "ERR_GEOS", .description = "ERR_GEOS"};
3296  default:
3297  return {.code = nullptr, .description = nullptr};
3298  }
3299 }
static const int32_t ERR_INTERRUPTED
Definition: Execute.h:1117
static const int32_t ERR_GEOS
Definition: Execute.h:1123
static const int32_t ERR_TOO_MANY_LITERALS
Definition: Execute.h:1119
static const int32_t ERR_STRING_CONST_IN_RESULTSET
Definition: Execute.h:1120
static const int32_t ERR_STREAMING_TOP_N_NOT_SUPPORTED_IN_RENDER_QUERY
Definition: Execute.h:1121
static const int32_t ERR_COLUMNAR_CONVERSION_NOT_SUPPORTED
Definition: Execute.h:1118
static const int32_t ERR_DIV_BY_ZERO
Definition: Execute.h:1109
static const int32_t ERR_OUT_OF_RENDER_MEM
Definition: Execute.h:1113
static const int32_t ERR_OVERFLOW_OR_UNDERFLOW
Definition: Execute.h:1115
static const int32_t ERR_OUT_OF_TIME
Definition: Execute.h:1116
static const int32_t ERR_UNSUPPORTED_SELF_JOIN
Definition: Execute.h:1112
static const int32_t ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
Definition: Execute.h:1122
static const int32_t ERR_OUT_OF_GPU_MEM
Definition: Execute.h:1110
static const int32_t ERR_OUT_OF_CPU_MEM
Definition: Execute.h:1114

+ Here is the caller graph for this function:

size_t anonymous_namespace{RelAlgExecutor.cpp}::groups_approx_upper_bound ( const std::vector< InputTableInfo > &  table_infos)

Upper bound estimation for the number of groups. Not strictly correct and not tight, but if the tables involved are really small we shouldn't waste time doing the NDV estimation. We don't account for cross-joins and / or group by unnested array, which is the reason this estimation isn't entirely reliable.

Definition at line 2716 of file RelAlgExecutor.cpp.

References CHECK.

Referenced by RelAlgExecutor::executeWorkUnit().

2716  {
2717  CHECK(!table_infos.empty());
2718  const auto& first_table = table_infos.front();
2719  size_t max_num_groups = first_table.info.getNumTuplesUpperBound();
2720  for (const auto& table_info : table_infos) {
2721  if (table_info.info.getNumTuplesUpperBound() > max_num_groups) {
2722  max_num_groups = table_info.info.getNumTuplesUpperBound();
2723  }
2724  }
2725  return std::max(max_num_groups, size_t(1));
2726 }
#define CHECK(condition)
Definition: Logger.h:197

+ Here is the caller graph for this function:

template<class T >
int64_t anonymous_namespace{RelAlgExecutor.cpp}::insert_one_dict_str ( T *  col_data,
const std::string &  columnName,
const SQLTypeInfo columnType,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2082 of file RelAlgExecutor.cpp.

References CHECK, logger::ERROR, SQLTypeInfo::get_comp_param(), Analyzer::Constant::get_constval(), Analyzer::Constant::get_is_null(), Catalog_Namespace::Catalog::getMetadataForDict(), inline_fixed_encoding_null_val(), LOG, Datum::stringval, and omnisci.dtypes::T.

Referenced by RelAlgExecutor::executeSimpleInsert(), and insert_one_dict_str().

2086  {
2087  if (col_cv->get_is_null()) {
2088  *col_data = inline_fixed_encoding_null_val(columnType);
2089  } else {
2090  const int dict_id = columnType.get_comp_param();
2091  const auto col_datum = col_cv->get_constval();
2092  const auto& str = *col_datum.stringval;
2093  const auto dd = catalog.getMetadataForDict(dict_id);
2094  CHECK(dd && dd->stringDict);
2095  int32_t str_id = dd->stringDict->getOrAdd(str);
2096  if (!dd->dictIsTemp) {
2097  const auto checkpoint_ok = dd->stringDict->checkpoint();
2098  if (!checkpoint_ok) {
2099  throw std::runtime_error("Failed to checkpoint dictionary for column " +
2100  columnName);
2101  }
2102  }
2103  const bool invalid = str_id > max_valid_int_value<T>();
2104  if (invalid || str_id == inline_int_null_value<int32_t>()) {
2105  if (invalid) {
2106  LOG(ERROR) << "Could not encode string: " << str
2107  << ", the encoded value doesn't fit in " << sizeof(T) * 8
2108  << " bits. Will store NULL instead.";
2109  }
2110  str_id = inline_fixed_encoding_null_val(columnType);
2111  }
2112  *col_data = str_id;
2113  }
2114  return *col_data;
2115 }
#define LOG(tag)
Definition: Logger.h:188
bool get_is_null() const
Definition: Analyzer.h:334
const DictDescriptor * getMetadataForDict(int dict_ref, bool loadDict=true) const
Definition: Catalog.cpp:1444
std::string * stringval
Definition: sqltypes.h:214
Datum get_constval() const
Definition: Analyzer.h:335
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:323
#define CHECK(condition)
Definition: Logger.h:197
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class T >
int64_t anonymous_namespace{RelAlgExecutor.cpp}::insert_one_dict_str ( T *  col_data,
const ColumnDescriptor cd,
const Analyzer::Constant col_cv,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 2118 of file RelAlgExecutor.cpp.

References ColumnDescriptor::columnName, ColumnDescriptor::columnType, and insert_one_dict_str().

2121  {
2122  return insert_one_dict_str(col_data, cd->columnName, cd->columnType, col_cv, catalog);
2123 }
int64_t insert_one_dict_str(T *col_data, const std::string &columnName, const SQLTypeInfo &columnType, const Analyzer::Constant *col_cv, const Catalog_Namespace::Catalog &catalog)
SQLTypeInfo columnType
std::string columnName

+ Here is the call graph for this function:

int64_t anonymous_namespace{RelAlgExecutor.cpp}::int_value_from_numbers_ptr ( const SQLTypeInfo type_info,
const int8_t *  data 
)

Definition at line 2147 of file RelAlgExecutor.cpp.

References CHECK, SQLTypeInfo::get_compression(), SQLTypeInfo::get_logical_size(), SQLTypeInfo::get_size(), SQLTypeInfo::get_type(), kBIGINT, kCHAR, kDATE, kENCODING_DICT, kINT, kSMALLINT, kTEXT, kTIME, kTIMESTAMP, kTINYINT, and kVARCHAR.

Referenced by get_shard_for_key().

2147  {
2148  size_t sz = 0;
2149  switch (type_info.get_type()) {
2150  case kTINYINT:
2151  case kSMALLINT:
2152  case kINT:
2153  case kBIGINT:
2154  case kTIMESTAMP:
2155  case kTIME:
2156  case kDATE:
2157  sz = type_info.get_logical_size();
2158  break;
2159  case kTEXT:
2160  case kVARCHAR:
2161  case kCHAR:
2162  CHECK(type_info.get_compression() == kENCODING_DICT);
2163  sz = type_info.get_size();
2164  break;
2165  default:
2166  CHECK(false) << "Unexpected sharding key datatype";
2167  }
2168 
2169  switch (sz) {
2170  case 1:
2171  return *(reinterpret_cast<const int8_t*>(data));
2172  case 2:
2173  return *(reinterpret_cast<const int16_t*>(data));
2174  case 4:
2175  return *(reinterpret_cast<const int32_t*>(data));
2176  case 8:
2177  return *(reinterpret_cast<const int64_t*>(data));
2178  default:
2179  CHECK(false);
2180  return 0;
2181  }
2182 }
HOST DEVICE int get_size() const
Definition: sqltypes.h:324
Definition: sqltypes.h:48
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:314
int get_logical_size() const
Definition: sqltypes.h:325
Definition: sqltypes.h:51
Definition: sqltypes.h:52
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:322
Definition: sqltypes.h:40
#define CHECK(condition)
Definition: Logger.h:197
Definition: sqltypes.h:44

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_agg ( const Analyzer::Expr expr)

Definition at line 1422 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_aggtype(), kAVG, kMAX, kMIN, and kSUM.

Referenced by anonymous_namespace{RelAlgDagBuilder.cpp}::create_compound(), RelAlgExecutor::executeWorkUnit(), get_logical_type_for_expr(), and ResultSet::getSingleSlotTargetBitmap().

1422  {
1423  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
1424  if (agg_expr && agg_expr->get_contains_agg()) {
1425  auto agg_type = agg_expr->get_aggtype();
1426  if (agg_type == SQLAgg::kMIN || agg_type == SQLAgg::kMAX ||
1427  agg_type == SQLAgg::kSUM || agg_type == SQLAgg::kAVG) {
1428  return true;
1429  }
1430  }
1431  return false;
1432 }
Definition: sqldefs.h:73
Definition: sqldefs.h:75
SQLAgg get_aggtype() const
Definition: Analyzer.h:1095
Definition: sqldefs.h:74
Definition: sqldefs.h:72

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_count_distinct ( const Analyzer::Expr expr)

Definition at line 1417 of file RelAlgExecutor.cpp.

References Analyzer::AggExpr::get_is_distinct().

Referenced by get_logical_type_for_expr().

1417  {
1418  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(expr);
1419  return agg_expr && agg_expr->get_is_distinct();
1420 }
bool get_is_distinct() const
Definition: Analyzer.h:1098

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::is_window_execution_unit ( const RelAlgExecutionUnit ra_exe_unit)

Definition at line 1739 of file RelAlgExecutor.cpp.

References RelAlgExecutionUnit::target_exprs.

Referenced by RelAlgExecutor::executeWorkUnit().

1739  {
1740  return std::any_of(ra_exe_unit.target_exprs.begin(),
1741  ra_exe_unit.target_exprs.end(),
1742  [](const Analyzer::Expr* expr) {
1743  return dynamic_cast<const Analyzer::WindowFunction*>(expr);
1744  });
1745 }
std::vector< Analyzer::Expr * > target_exprs

+ Here is the caller graph for this function:

std::vector<JoinType> anonymous_namespace{RelAlgExecutor.cpp}::left_deep_join_types ( const RelLeftDeepInnerJoin left_deep_join)

Definition at line 3428 of file RelAlgExecutor.cpp.

References CHECK_GE, RelLeftDeepInnerJoin::getOuterCondition(), INNER, RelAlgNode::inputCount(), and LEFT.

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::translateLeftDeepJoinFilter().

3428  {
3429  CHECK_GE(left_deep_join->inputCount(), size_t(2));
3430  std::vector<JoinType> join_types(left_deep_join->inputCount() - 1, JoinType::INNER);
3431  for (size_t nesting_level = 1; nesting_level <= left_deep_join->inputCount() - 1;
3432  ++nesting_level) {
3433  if (left_deep_join->getOuterCondition(nesting_level)) {
3434  join_types[nesting_level - 1] = JoinType::LEFT;
3435  }
3436  }
3437  return join_types;
3438 }
const RexScalar * getOuterCondition(const size_t nesting_level) const
#define CHECK_GE(x, y)
Definition: Logger.h:210
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class QualsList >
bool anonymous_namespace{RelAlgExecutor.cpp}::list_contains_expression ( const QualsList &  haystack,
const std::shared_ptr< Analyzer::Expr > &  needle 
)

Definition at line 3609 of file RelAlgExecutor.cpp.

Referenced by reverse_logical_distribution().

3610  {
3611  for (const auto& qual : haystack) {
3612  if (*qual == *needle) {
3613  return true;
3614  }
3615  }
3616  return false;
3617 }

+ Here is the caller graph for this function:

bool anonymous_namespace{RelAlgExecutor.cpp}::node_is_aggregate ( const RelAlgNode ra)

Definition at line 58 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::executeRelAlgQuerySingleStep(), and RelAlgExecutor::executeSort().

58  {
59  const auto compound = dynamic_cast<const RelCompound*>(ra);
60  const auto aggregate = dynamic_cast<const RelAggregate*>(ra);
61  return ((compound && compound->isAggregate()) || aggregate);
62 }

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::prepare_foreign_table_for_execution ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 145 of file RelAlgExecutor.cpp.

References prepare_string_dictionaries(), and set_parallelism_hints().

Referenced by RelAlgExecutor::executeRelAlgQueryNoRetry(), and RelAlgExecutor::executeRelAlgStep().

146  {
147  // Iterate through ra_node inputs for types that need to be loaded pre-execution
148  // If they do not have valid metadata, load them into CPU memory to generate
149  // the metadata and leave them ready to be used by the query
150  set_parallelism_hints(ra_node, catalog);
151  prepare_string_dictionaries(ra_node, catalog);
152 }
void prepare_string_dictionaries(const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)
void set_parallelism_hints(const RelAlgNode &ra_node, const Catalog_Namespace::Catalog &catalog)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::prepare_string_dictionaries ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 110 of file RelAlgExecutor.cpp.

References CHECK, Data_Namespace::CPU_LEVEL, StorageType::FOREIGN_TABLE, get_physical_inputs(), Chunk_NS::Chunk::getChunk(), Catalog_Namespace::Catalog::getColumnIdBySpi(), Catalog_Namespace::Catalog::getDatabaseId(), Catalog_Namespace::Catalog::getDataMgr(), Catalog_Namespace::Catalog::getForeignTable(), Catalog_Namespace::Catalog::getMetadataForColumn(), Catalog_Namespace::Catalog::getMetadataForTable(), foreign_storage::is_metadata_placeholder(), and test_readcsv::table.

Referenced by prepare_foreign_table_for_execution().

111  {
112  for (const auto& physical_input : get_physical_inputs(&ra_node)) {
113  int table_id = physical_input.table_id;
114  auto table = catalog.getMetadataForTable(table_id, false);
115  if (table && table->storageType == StorageType::FOREIGN_TABLE) {
116  int col_id = catalog.getColumnIdBySpi(table_id, physical_input.col_id);
117  const auto col_desc = catalog.getMetadataForColumn(table_id, col_id);
118  auto foreign_table = catalog.getForeignTable(table_id);
119  if (col_desc->columnType.is_dict_encoded_type()) {
120  CHECK(foreign_table->fragmenter != nullptr);
121  for (const auto& fragment :
122  foreign_table->fragmenter->getFragmentsForQuery().fragments) {
123  ChunkKey chunk_key = {
124  catalog.getDatabaseId(), table_id, col_id, fragment.fragmentId};
125  const ChunkMetadataMap& metadata_map = fragment.getChunkMetadataMap();
126  CHECK(metadata_map.find(col_id) != metadata_map.end());
127  if (foreign_storage::is_metadata_placeholder(*(metadata_map.at(col_id)))) {
128  // When this goes out of scope it will stay in CPU cache but become
129  // evictable
130  std::shared_ptr<Chunk_NS::Chunk> chunk =
131  Chunk_NS::Chunk::getChunk(col_desc,
132  &(catalog.getDataMgr()),
133  chunk_key,
135  0,
136  0,
137  0);
138  }
139  }
140  }
141  }
142  }
143 }
const foreign_storage::ForeignTable * getForeignTable(const std::string &tableName) const
Definition: Catalog.cpp:1398
std::vector< int > ChunkKey
Definition: types.h:37
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:222
std::map< int, std::shared_ptr< ChunkMetadata >> ChunkMetadataMap
const ColumnDescriptor * getMetadataForColumn(int tableId, const std::string &colName) const
int getDatabaseId() const
Definition: Catalog.h:276
bool is_metadata_placeholder(const ChunkMetadata &metadata)
static std::shared_ptr< Chunk > getChunk(const ColumnDescriptor *cd, DataMgr *data_mgr, const ChunkKey &key, const MemoryLevel mem_level, const int deviceId, const size_t num_bytes, const size_t num_elems)
Definition: Chunk.cpp:28
#define CHECK(condition)
Definition: Logger.h:197
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
static constexpr char const * FOREIGN_TABLE
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1543

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::reverse_logical_distribution ( const std::shared_ptr< Analyzer::Expr > &  expr)

Definition at line 3622 of file RelAlgExecutor.cpp.

References build_logical_expression(), CHECK_GE, i, kAND, kONE, kOR, list_contains_expression(), Parser::OperExpr::normalize(), qual_to_conjunctive_form(), and qual_to_disjunctive_form().

Referenced by RelAlgExecutor::makeJoinQuals().

3623  {
3624  const auto expr_terms = qual_to_disjunctive_form(expr);
3625  CHECK_GE(expr_terms.size(), size_t(1));
3626  const auto& first_term = expr_terms.front();
3627  const auto first_term_factors = qual_to_conjunctive_form(first_term);
3628  std::vector<std::shared_ptr<Analyzer::Expr>> common_factors;
3629  // First, collect the conjunctive components common to all the disjunctive components.
3630  // Don't do it for simple qualifiers, we only care about expensive or join qualifiers.
3631  for (const auto& first_term_factor : first_term_factors.quals) {
3632  bool is_common =
3633  expr_terms.size() > 1; // Only report common factors for disjunction.
3634  for (size_t i = 1; i < expr_terms.size(); ++i) {
3635  const auto crt_term_factors = qual_to_conjunctive_form(expr_terms[i]);
3636  if (!list_contains_expression(crt_term_factors.quals, first_term_factor)) {
3637  is_common = false;
3638  break;
3639  }
3640  }
3641  if (is_common) {
3642  common_factors.push_back(first_term_factor);
3643  }
3644  }
3645  if (common_factors.empty()) {
3646  return expr;
3647  }
3648  // Now that the common expressions are known, collect the remaining expressions.
3649  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_terms;
3650  for (const auto& term : expr_terms) {
3651  const auto term_cf = qual_to_conjunctive_form(term);
3652  std::vector<std::shared_ptr<Analyzer::Expr>> remaining_quals(
3653  term_cf.simple_quals.begin(), term_cf.simple_quals.end());
3654  for (const auto& qual : term_cf.quals) {
3655  if (!list_contains_expression(common_factors, qual)) {
3656  remaining_quals.push_back(qual);
3657  }
3658  }
3659  if (!remaining_quals.empty()) {
3660  remaining_terms.push_back(build_logical_expression(remaining_quals, kAND));
3661  }
3662  }
3663  // Reconstruct the expression with the transformation applied.
3664  const auto common_expr = build_logical_expression(common_factors, kAND);
3665  if (remaining_terms.empty()) {
3666  return common_expr;
3667  }
3668  const auto remaining_expr = build_logical_expression(remaining_terms, kOR);
3669  return Parser::OperExpr::normalize(kAND, kONE, common_expr, remaining_expr);
3670 }
Definition: sqldefs.h:38
#define CHECK_GE(x, y)
Definition: Logger.h:210
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
bool list_contains_expression(const QualsList &haystack, const std::shared_ptr< Analyzer::Expr > &needle)
Definition: sqldefs.h:37
std::shared_ptr< Analyzer::Expr > build_logical_expression(const std::vector< std::shared_ptr< Analyzer::Expr >> &factors, const SQLOps sql_op)
static std::shared_ptr< Analyzer::Expr > normalize(const SQLOps optype, const SQLQualifier qual, std::shared_ptr< Analyzer::Expr > left_expr, std::shared_ptr< Analyzer::Expr > right_expr)
Definition: ParserNode.cpp:283
Definition: sqldefs.h:69
std::vector< std::shared_ptr< Analyzer::Expr > > qual_to_disjunctive_form(const std::shared_ptr< Analyzer::Expr > &qual_expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::rewrite_quals ( const std::list< std::shared_ptr< Analyzer::Expr >> &  quals)

Definition at line 3483 of file RelAlgExecutor.cpp.

References rewrite_expr().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

3484  {
3485  std::list<std::shared_ptr<Analyzer::Expr>> rewritten_quals;
3486  for (const auto& qual : quals) {
3487  const auto rewritten_qual = rewrite_expr(qual.get());
3488  rewritten_quals.push_back(rewritten_qual ? rewritten_qual : qual);
3489  }
3490  return rewritten_quals;
3491 }
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<const RexScalar*> anonymous_namespace{RelAlgExecutor.cpp}::rex_to_conjunctive_form ( const RexScalar qual_expr)

Definition at line 3582 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, i, and kAND.

Referenced by RelAlgExecutor::makeJoinQuals().

3582  {
3583  CHECK(qual_expr);
3584  const auto bin_oper = dynamic_cast<const RexOperator*>(qual_expr);
3585  if (!bin_oper || bin_oper->getOperator() != kAND) {
3586  return {qual_expr};
3587  }
3588  CHECK_GE(bin_oper->size(), size_t(2));
3589  auto lhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(0));
3590  for (size_t i = 1; i < bin_oper->size(); ++i) {
3591  const auto rhs_cf = rex_to_conjunctive_form(bin_oper->getOperand(i));
3592  lhs_cf.insert(lhs_cf.end(), rhs_cf.begin(), rhs_cf.end());
3593  }
3594  return lhs_cf;
3595 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::vector< const RexScalar * > rex_to_conjunctive_form(const RexScalar *qual_expr)
Definition: sqldefs.h:37
#define CHECK(condition)
Definition: Logger.h:197

+ Here is the caller graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelCompound compound 
)

Definition at line 1205 of file RelAlgExecutor.cpp.

References RelCompound::getScalarSource().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1205  {
1206  return compound->getScalarSource(i);
1207 }
const RexScalar * getScalarSource(const size_t i) const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelProject project 
)

Definition at line 1209 of file RelAlgExecutor.cpp.

References RelProject::getProjectAt().

1209  {
1210  return project->getProjectAt(i);
1211 }
const RexScalar * getProjectAt(const size_t idx) const

+ Here is the call graph for this function:

const RexScalar* anonymous_namespace{RelAlgExecutor.cpp}::scalar_at ( const size_t  i,
const RelTableFunction table_func 
)

Definition at line 1213 of file RelAlgExecutor.cpp.

References RelTableFunction::getTableFuncInputAt().

1213  {
1214  return table_func->getTableFuncInputAt(i);
1215 }
const RexScalar * getTableFuncInputAt(const size_t idx) const

+ Here is the call graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints ( const RelAlgNode ra_node,
const Catalog_Namespace::Catalog catalog 
)

Definition at line 76 of file RelAlgExecutor.cpp.

References CHECK, Data_Namespace::CPU_LEVEL, StorageType::FOREIGN_TABLE, get_physical_inputs(), Catalog_Namespace::Catalog::getColumnIdBySpi(), Catalog_Namespace::Catalog::getDatabaseId(), Catalog_Namespace::Catalog::getDataMgr(), PersistentStorageMgr::getForeignStorageMgr(), Catalog_Namespace::Catalog::getForeignTable(), Catalog_Namespace::Catalog::getMetadataForColumn(), Catalog_Namespace::Catalog::getMetadataForTable(), Data_Namespace::DataMgr::getPersistentStorageMgr(), and test_readcsv::table.

Referenced by prepare_foreign_table_for_execution().

77  {
78  std::map<ChunkKey, std::set<foreign_storage::ForeignStorageMgr::ParallelismHint>>
79  parallelism_hints_per_table;
80  for (const auto& physical_input : get_physical_inputs(&ra_node)) {
81  int table_id = physical_input.table_id;
82  auto table = catalog.getMetadataForTable(table_id, false);
83  if (table && table->storageType == StorageType::FOREIGN_TABLE) {
84  int col_id = catalog.getColumnIdBySpi(table_id, physical_input.col_id);
85  const auto col_desc = catalog.getMetadataForColumn(table_id, col_id);
86  auto foreign_table = catalog.getForeignTable(table_id);
87  for (const auto& fragment :
88  foreign_table->fragmenter->getFragmentsForQuery().fragments) {
89  Chunk_NS::Chunk chunk{col_desc};
90  ChunkKey chunk_key = {
91  catalog.getDatabaseId(), table_id, col_id, fragment.fragmentId};
92  // do not include chunk hints that are in CPU memory
93  if (!chunk.isChunkOnDevice(
94  &catalog.getDataMgr(), chunk_key, Data_Namespace::CPU_LEVEL, 0)) {
95  parallelism_hints_per_table[{catalog.getDatabaseId(), table_id}].insert(
97  fragment.fragmentId});
98  }
99  }
100  }
101  }
102  if (!parallelism_hints_per_table.empty()) {
103  auto foreign_storage_mgr =
105  CHECK(foreign_storage_mgr);
106  foreign_storage_mgr->setParallelismHints(parallelism_hints_per_table);
107  }
108 }
const foreign_storage::ForeignTable * getForeignTable(const std::string &tableName) const
Definition: Catalog.cpp:1398
std::vector< int > ChunkKey
Definition: types.h:37
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:222
std::pair< int, int > ParallelismHint
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:571
foreign_storage::ForeignStorageMgr * getForeignStorageMgr() const
const ColumnDescriptor * getMetadataForColumn(int tableId, const std::string &colName) const
int getDatabaseId() const
Definition: Catalog.h:276
#define CHECK(condition)
Definition: Logger.h:197
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
std::unordered_set< PhysicalInput > get_physical_inputs(const RelAlgNode *ra)
static constexpr char const * FOREIGN_TABLE
const int getColumnIdBySpi(const int tableId, const size_t spi) const
Definition: Catalog.cpp:1543

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict ( const std::shared_ptr< Analyzer::Expr expr)

Definition at line 1217 of file RelAlgExecutor.cpp.

References kENCODING_DICT, kENCODING_NONE, and TRANSIENT_DICT_ID.

Referenced by set_transient_dict_maybe(), translate_groupby_exprs(), and translate_targets().

1218  {
1219  const auto& ti = expr->get_type_info();
1220  if (!ti.is_string() || ti.get_compression() != kENCODING_NONE) {
1221  return expr;
1222  }
1223  auto transient_dict_ti = ti;
1224  transient_dict_ti.set_compression(kENCODING_DICT);
1225  transient_dict_ti.set_comp_param(TRANSIENT_DICT_ID);
1226  transient_dict_ti.set_fixed_size();
1227  return expr->add_cast(transient_dict_ti);
1228 }
#define TRANSIENT_DICT_ID
Definition: sqltypes.h:253

+ Here is the caller graph for this function:

void anonymous_namespace{RelAlgExecutor.cpp}::set_transient_dict_maybe ( std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::shared_ptr< Analyzer::Expr > &  expr 
)

Definition at line 1230 of file RelAlgExecutor.cpp.

References fold_expr(), and set_transient_dict().

Referenced by translate_scalar_sources(), and translate_scalar_sources_for_update().

1232  {
1233  try {
1234  scalar_sources.push_back(set_transient_dict(fold_expr(expr.get())));
1235  } catch (...) {
1236  scalar_sources.push_back(fold_expr(expr.get()));
1237  }
1238 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::synthesize_inputs ( const RelAlgNode ra_node,
const size_t  nest_level,
const std::vector< TargetMetaInfo > &  in_metainfo,
const std::unordered_map< const RelAlgNode *, int > &  input_to_nest_level 
)

Definition at line 3743 of file RelAlgExecutor.cpp.

References CHECK, CHECK_GE, CHECK_LE, RelAlgNode::getInput(), RelAlgNode::inputCount(), and table_id_from_ra().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and get_inputs_meta().

3747  {
3748  CHECK_LE(size_t(1), ra_node->inputCount());
3749  CHECK_GE(size_t(2), ra_node->inputCount());
3750  const auto input = ra_node->getInput(nest_level);
3751  const auto it_rte_idx = input_to_nest_level.find(input);
3752  CHECK(it_rte_idx != input_to_nest_level.end());
3753  const int rte_idx = it_rte_idx->second;
3754  const int table_id = table_id_from_ra(input);
3755  std::vector<std::shared_ptr<Analyzer::Expr>> inputs;
3756  const auto scan_ra = dynamic_cast<const RelScan*>(input);
3757  int input_idx = 0;
3758  for (const auto& input_meta : in_metainfo) {
3759  inputs.push_back(
3760  std::make_shared<Analyzer::ColumnVar>(input_meta.get_type_info(),
3761  table_id,
3762  scan_ra ? input_idx + 1 : input_idx,
3763  rte_idx));
3764  ++input_idx;
3765  }
3766  return inputs;
3767 }
#define CHECK_GE(x, y)
Definition: Logger.h:210
const RelAlgNode * getInput(const size_t idx) const
#define CHECK_LE(x, y)
Definition: Logger.h:208
int table_id_from_ra(const RelAlgNode *ra_node)
#define CHECK(condition)
Definition: Logger.h:197
const size_t inputCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int anonymous_namespace{RelAlgExecutor.cpp}::table_id_from_ra ( const RelAlgNode ra_node)

Definition at line 1015 of file RelAlgExecutor.cpp.

References CHECK, RelAlgNode::getId(), and RelScan::getTableDescriptor().

Referenced by collect_used_input_desc(), get_input_desc_impl(), and synthesize_inputs().

1015  {
1016  const auto scan_ra = dynamic_cast<const RelScan*>(ra_node);
1017  if (scan_ra) {
1018  const auto td = scan_ra->getTableDescriptor();
1019  CHECK(td);
1020  return td->tableId;
1021  }
1022  return -ra_node->getId();
1023 }
unsigned getId() const
#define CHECK(condition)
Definition: Logger.h:197
const TableDescriptor * getTableDescriptor() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::target_exprs_for_union ( RelAlgNode const input_node)

Definition at line 3901 of file RelAlgExecutor.cpp.

References RelAlgNode::getId(), RelAlgNode::getOutputMetainfo(), i, shared::printContainer(), and VLOG.

Referenced by RelAlgExecutor::createUnionWorkUnit().

3902  {
3903  std::vector<TargetMetaInfo> const& tmis = input_node->getOutputMetainfo();
3904  VLOG(3) << "input_node->getOutputMetainfo()=" << shared::printContainer(tmis);
3905  const int negative_node_id = -input_node->getId();
3906  std::vector<std::shared_ptr<Analyzer::Expr>> target_exprs;
3907  target_exprs.reserve(tmis.size());
3908  for (size_t i = 0; i < tmis.size(); ++i) {
3909  target_exprs.push_back(std::make_shared<Analyzer::ColumnVar>(
3910  tmis[i].get_type_info(), negative_node_id, i, 0));
3911  }
3912  return target_exprs;
3913 }
PrintContainer< CONTAINER > printContainer(CONTAINER &container)
Definition: misc.h:64
#define VLOG(n)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::shared_ptr<Analyzer::Expr> anonymous_namespace{RelAlgExecutor.cpp}::transform_to_inner ( const Analyzer::Expr expr)

Definition at line 1834 of file RelAlgExecutor.cpp.

Referenced by RelAlgExecutor::computeWindow().

1834  {
1835  const auto tuple = dynamic_cast<const Analyzer::ExpressionTuple*>(expr);
1836  if (tuple) {
1837  std::vector<std::shared_ptr<Analyzer::Expr>> transformed_tuple;
1838  for (const auto& element : tuple->getTuple()) {
1839  transformed_tuple.push_back(transform_to_inner(element.get()));
1840  }
1841  return makeExpr<Analyzer::ExpressionTuple>(transformed_tuple);
1842  }
1843  const auto col = dynamic_cast<const Analyzer::ColumnVar*>(expr);
1844  if (!col) {
1845  throw std::runtime_error("Only columns supported in the window partition for now");
1846  }
1847  return makeExpr<Analyzer::ColumnVar>(
1848  col->get_type_info(), col->get_table_id(), col->get_column_id(), 1);
1849 }
std::shared_ptr< Analyzer::Expr > transform_to_inner(const Analyzer::Expr *expr)

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelCompound compound,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 1313 of file RelAlgExecutor.cpp.

References RelCompound::getGroupByCount(), RelCompound::isAggregate(), and set_transient_dict().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

1315  {
1316  if (!compound->isAggregate()) {
1317  return {nullptr};
1318  }
1319  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
1320  for (size_t group_idx = 0; group_idx < compound->getGroupByCount(); ++group_idx) {
1321  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
1322  }
1323  return groupby_exprs;
1324 }
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
const size_t getGroupByCount() const
bool isAggregate() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::list<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_groupby_exprs ( const RelAggregate aggregate,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources 
)

Definition at line 1326 of file RelAlgExecutor.cpp.

References RelAggregate::getGroupByCount(), and set_transient_dict().

1328  {
1329  std::list<std::shared_ptr<Analyzer::Expr>> groupby_exprs;
1330  for (size_t group_idx = 0; group_idx < aggregate->getGroupByCount(); ++group_idx) {
1331  groupby_exprs.push_back(set_transient_dict(scalar_sources[group_idx]));
1332  }
1333  return groupby_exprs;
1334 }
const size_t getGroupByCount() const
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)

+ Here is the call graph for this function:

QualsConjunctiveForm anonymous_namespace{RelAlgExecutor.cpp}::translate_quals ( const RelCompound compound,
const RelAlgTranslator translator 
)

Definition at line 1336 of file RelAlgExecutor.cpp.

References fold_expr(), RelCompound::getFilterExpr(), qual_to_conjunctive_form(), and RelAlgTranslator::translateScalarRex().

Referenced by RelAlgExecutor::createCompoundWorkUnit().

1337  {
1338  const auto filter_rex = compound->getFilterExpr();
1339  const auto filter_expr =
1340  filter_rex ? translator.translateScalarRex(filter_rex) : nullptr;
1341  return filter_expr ? qual_to_conjunctive_form(fold_expr(filter_expr.get()))
1343 }
const RexScalar * getFilterExpr() const
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
QualsConjunctiveForm qual_to_conjunctive_form(const std::shared_ptr< Analyzer::Expr > qual_expr)
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources ( const RA *  ra_node,
const RelAlgTranslator translator,
const ::ExecutorType  executor_type 
)

Definition at line 1250 of file RelAlgExecutor.cpp.

References cast_dict_to_none(), fold_expr(), get_scalar_sources_size(), i, Native, rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), RelAlgTranslator::translateScalarRex(), and VLOG.

Referenced by RelAlgExecutor::createCompoundWorkUnit(), RelAlgExecutor::createProjectWorkUnit(), and RelAlgExecutor::createTableFunctionWorkUnit().

1253  {
1254  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
1255  const size_t scalar_sources_size = get_scalar_sources_size(ra_node);
1256  VLOG(3) << "get_scalar_sources_size(" << ra_node->toString()
1257  << ") = " << scalar_sources_size;
1258  for (size_t i = 0; i < scalar_sources_size; ++i) {
1259  const auto scalar_rex = scalar_at(i, ra_node);
1260  if (dynamic_cast<const RexRef*>(scalar_rex)) {
1261  // RexRef are synthetic scalars we append at the end of the real ones
1262  // for the sake of taking memory ownership, no real work needed here.
1263  continue;
1264  }
1265 
1266  const auto scalar_expr =
1267  rewrite_array_elements(translator.translateScalarRex(scalar_rex).get());
1268  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
1269  if (executor_type == ExecutorType::Native) {
1270  set_transient_dict_maybe(scalar_sources, rewritten_expr);
1271  } else {
1272  scalar_sources.push_back(cast_dict_to_none(fold_expr(rewritten_expr.get())));
1273  }
1274  }
1275 
1276  return scalar_sources;
1277 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t get_scalar_sources_size(const RelCompound *compound)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > cast_dict_to_none(const std::shared_ptr< Analyzer::Expr > &input)
const RexScalar * scalar_at(const size_t i, const RelCompound *compound)
#define VLOG(n)
Definition: Logger.h:291
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<class RA >
std::vector<std::shared_ptr<Analyzer::Expr> > anonymous_namespace{RelAlgExecutor.cpp}::translate_scalar_sources_for_update ( const RA *  ra_node,
const RelAlgTranslator translator,
int32_t  tableId,
const Catalog_Namespace::Catalog cat,
const ColumnNameList colNames,
size_t  starting_projection_column_idx 
)

Definition at line 1280 of file RelAlgExecutor.cpp.

References cat(), get_scalar_sources_size(), i, rewrite_array_elements(), rewrite_expr(), scalar_at(), set_transient_dict_maybe(), and RelAlgTranslator::translateScalarRex().

1286  {
1287  std::vector<std::shared_ptr<Analyzer::Expr>> scalar_sources;
1288  for (size_t i = 0; i < get_scalar_sources_size(ra_node); ++i) {
1289  const auto scalar_rex = scalar_at(i, ra_node);
1290  if (dynamic_cast<const RexRef*>(scalar_rex)) {
1291  // RexRef are synthetic scalars we append at the end of the real ones
1292  // for the sake of taking memory ownership, no real work needed here.
1293  continue;
1294  }
1295 
1296  std::shared_ptr<Analyzer::Expr> translated_expr;
1297  if (i >= starting_projection_column_idx && i < get_scalar_sources_size(ra_node) - 1) {
1298  translated_expr = cast_to_column_type(translator.translateScalarRex(scalar_rex),
1299  tableId,
1300  cat,
1301  colNames[i - starting_projection_column_idx]);
1302  } else {
1303  translated_expr = translator.translateScalarRex(scalar_rex);
1304  }
1305  const auto scalar_expr = rewrite_array_elements(translated_expr.get());
1306  const auto rewritten_expr = rewrite_expr(scalar_expr.get());
1307  set_transient_dict_maybe(scalar_sources, rewritten_expr);
1308  }
1309 
1310  return scalar_sources;
1311 }
Analyzer::ExpressionPtr rewrite_array_elements(Analyzer::Expr const *expr)
std::string cat(Ts &&...args)
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t get_scalar_sources_size(const RelCompound *compound)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
const RexScalar * scalar_at(const size_t i, const RelCompound *compound)
void set_transient_dict_maybe(std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources, const std::shared_ptr< Analyzer::Expr > &expr)

+ Here is the call graph for this function:

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelCompound compound,
const RelAlgTranslator translator,
const ExecutorType  executor_type 
)

Definition at line 1345 of file RelAlgExecutor.cpp.

References cast_dict_to_none(), CHECK, CHECK_GE, CHECK_LE, fold_expr(), RexRef::getIndex(), RelCompound::getTargetExpr(), i, Analyzer::Var::kGROUPBY, Native, rewrite_expr(), set_transient_dict(), RelCompound::size(), RelAlgTranslator::translateAggregateRex(), RelAlgTranslator::translateScalarRex(), and var_ref().

Referenced by RelAlgExecutor::createAggregateWorkUnit(), and RelAlgExecutor::createCompoundWorkUnit().

1351  {
1352  std::vector<Analyzer::Expr*> target_exprs;
1353  for (size_t i = 0; i < compound->size(); ++i) {
1354  const auto target_rex = compound->getTargetExpr(i);
1355  const auto target_rex_agg = dynamic_cast<const RexAgg*>(target_rex);
1356  std::shared_ptr<Analyzer::Expr> target_expr;
1357  if (target_rex_agg) {
1358  target_expr =
1359  RelAlgTranslator::translateAggregateRex(target_rex_agg, scalar_sources);
1360  } else {
1361  const auto target_rex_scalar = dynamic_cast<const RexScalar*>(target_rex);
1362  const auto target_rex_ref = dynamic_cast<const RexRef*>(target_rex_scalar);
1363  if (target_rex_ref) {
1364  const auto ref_idx = target_rex_ref->getIndex();
1365  CHECK_GE(ref_idx, size_t(1));
1366  CHECK_LE(ref_idx, groupby_exprs.size());
1367  const auto groupby_expr = *std::next(groupby_exprs.begin(), ref_idx - 1);
1368  target_expr = var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, ref_idx);
1369  } else {
1370  target_expr = translator.translateScalarRex(target_rex_scalar);
1371  auto rewritten_expr = rewrite_expr(target_expr.get());
1372  target_expr = fold_expr(rewritten_expr.get());
1373  if (executor_type == ExecutorType::Native) {
1374  try {
1375  target_expr = set_transient_dict(target_expr);
1376  } catch (...) {
1377  // noop
1378  }
1379  } else {
1380  target_expr = cast_dict_to_none(target_expr);
1381  }
1382  }
1383  }
1384  CHECK(target_expr);
1385  target_exprs_owned.push_back(target_expr);
1386  target_exprs.push_back(target_expr.get());
1387  }
1388  return target_exprs;
1389 }
const Rex * getTargetExpr(const size_t i) const
size_t getIndex() const
std::shared_ptr< Analyzer::Expr > translateScalarRex(const RexScalar *rex) const
size_t size() const override
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1675
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::shared_ptr< Analyzer::Expr > set_transient_dict(const std::shared_ptr< Analyzer::Expr > expr)
Analyzer::ExpressionPtr rewrite_expr(const Analyzer::Expr *expr)
std::shared_ptr< Analyzer::Expr > cast_dict_to_none(const std::shared_ptr< Analyzer::Expr > &input)
#define CHECK_LE(x, y)
Definition: Logger.h:208
#define CHECK(condition)
Definition: Logger.h:197
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<Analyzer::Expr*> anonymous_namespace{RelAlgExecutor.cpp}::translate_targets ( std::vector< std::shared_ptr< Analyzer::Expr >> &  target_exprs_owned,
const std::vector< std::shared_ptr< Analyzer::Expr >> &  scalar_sources,
const std::list< std::shared_ptr< Analyzer::Expr >> &  groupby_exprs,
const RelAggregate aggregate,
const RelAlgTranslator translator 
)

Definition at line 1391 of file RelAlgExecutor.cpp.

References CHECK, fold_expr(), RelAggregate::getAggExprs(), Analyzer::Var::kGROUPBY, RelAlgTranslator::translateAggregateRex(), and var_ref().

1396  {
1397  std::vector<Analyzer::Expr*> target_exprs;
1398  size_t group_key_idx = 1;
1399  for (const auto& groupby_expr : groupby_exprs) {
1400  auto target_expr =
1401  var_ref(groupby_expr.get(), Analyzer::Var::kGROUPBY, group_key_idx++);
1402  target_exprs_owned.push_back(target_expr);
1403  target_exprs.push_back(target_expr.get());
1404  }
1405 
1406  for (const auto& target_rex_agg : aggregate->getAggExprs()) {
1407  auto target_expr =
1408  RelAlgTranslator::translateAggregateRex(target_rex_agg.get(), scalar_sources);
1409  CHECK(target_expr);
1410  target_expr = fold_expr(target_expr.get());
1411  target_exprs_owned.push_back(target_expr);
1412  target_exprs.push_back(target_expr.get());
1413  }
1414  return target_exprs;
1415 }
std::shared_ptr< Analyzer::Var > var_ref(const Analyzer::Expr *expr, const Analyzer::Var::WhichRow which_row, const int varno)
Definition: Analyzer.h:1675
const std::vector< std::unique_ptr< const RexAgg > > & getAggExprs() const
#define CHECK(condition)
Definition: Logger.h:197
std::shared_ptr< Analyzer::Expr > fold_expr(const Analyzer::Expr *expr)
static std::shared_ptr< Analyzer::Expr > translateAggregateRex(const RexAgg *rex, const std::vector< std::shared_ptr< Analyzer::Expr >> &scalar_sources)

+ Here is the call graph for this function: