OmniSciDB  a667adc9c8
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TargetExprCodegen Struct Reference

#include <TargetExprBuilder.h>

+ Collaboration diagram for TargetExprCodegen:

Public Member Functions

 TargetExprCodegen (const Analyzer::Expr *target_expr, TargetInfo &target_info, const int32_t base_slot_index, const size_t target_idx, const bool is_group_by)
 
void codegen (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, GroupByAndAggregate::DiamondCodegen &diamond_codegen, GroupByAndAggregate::DiamondCodegen *sample_cfg=nullptr) const
 
void codegenAggregate (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, int32_t slot_index) const
 

Public Attributes

const Analyzer::Exprtarget_expr
 
TargetInfo target_info
 
int32_t base_slot_index
 
size_t target_idx
 
bool is_group_by
 

Detailed Description

Definition at line 33 of file TargetExprBuilder.h.

Constructor & Destructor Documentation

TargetExprCodegen::TargetExprCodegen ( const Analyzer::Expr target_expr,
TargetInfo target_info,
const int32_t  base_slot_index,
const size_t  target_idx,
const bool  is_group_by 
)
inline

Definition at line 34 of file TargetExprBuilder.h.

39  : target_expr(target_expr)
40  , target_info(target_info)
const Analyzer::Expr * target_expr

Member Function Documentation

void TargetExprCodegen::codegen ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const GpuSharedMemoryContext gpu_smem_context,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
GroupByAndAggregate::DiamondCodegen diamond_codegen,
GroupByAndAggregate::DiamondCodegen sample_cfg = nullptr 
) const

Definition at line 94 of file TargetExprBuilder.cpp.

References agg_arg(), anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), AUTOMATIC_IR_METADATA, base_slot_index, CHECK, CHECK_EQ, CHECK_GE, GroupByAndAggregate::codegenAggArg(), codegenAggregate(), GroupByAndAggregate::codegenWindowRowPointer(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), g_bigint_count, get_int_type(), SQLTypeInfo::get_physical_coord_cols(), QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, i, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), GpuSharedMemoryContext::isSharedMemoryUsed(), LL_BUILDER, LL_CONTEXT, LL_INT, WindowProjectNodeContext::resetWindowFunctionContext(), TargetInfo::sql_type, target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), and window_function_is_aggregate().

105  {
106  CHECK(group_by_and_agg);
107  CHECK(executor);
108  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
109 
110  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
111  const auto arg_expr = agg_arg(target_expr);
112 
113  const auto agg_fn_names = agg_fn_base_names(target_info);
114  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
116  auto target_lvs =
117  window_func
118  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
119  : group_by_and_agg->codegenAggArg(target_expr, co);
120  const auto window_row_ptr = window_func
121  ? group_by_and_agg->codegenWindowRowPointer(
122  window_func, query_mem_desc, co, diamond_codegen)
123  : nullptr;
124  if (window_row_ptr) {
125  agg_out_ptr_w_idx =
126  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
127  if (window_function_is_aggregate(window_func->getKind())) {
128  out_row_idx = window_row_ptr;
129  }
130  }
131 
132  llvm::Value* str_target_lv{nullptr};
133  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
134  // none encoding string, pop the packed pointer + length since
135  // it's only useful for IS NULL checks and assumed to be only
136  // two components (pointer and length) for the purpose of projection
137  str_target_lv = target_lvs.front();
138  target_lvs.erase(target_lvs.begin());
139  }
141  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
142  // array. Ensure that the target values generated match the number of agg
143  // functions before continuing
144  if (target_lvs.size() < agg_fn_names.size()) {
145  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
146  std::vector<llvm::Value*> new_target_lvs;
147  new_target_lvs.reserve(agg_fn_names.size());
148  for (const auto& target_lv : target_lvs) {
149  new_target_lvs.push_back(target_lv);
150  new_target_lvs.push_back(target_lv);
151  }
152  target_lvs = new_target_lvs;
153  }
154  }
155  if (target_lvs.size() < agg_fn_names.size()) {
156  CHECK_EQ(size_t(1), target_lvs.size());
157  CHECK_EQ(size_t(2), agg_fn_names.size());
158  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
159  target_lvs.push_back(target_lvs.front());
160  }
161  } else {
163  if (!target_info.is_agg) {
164  CHECK_EQ(static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
165  target_lvs.size());
166  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
167  }
168  } else {
169  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
170  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
171  }
172  }
173 
174  int32_t slot_index = base_slot_index;
175  CHECK_GE(slot_index, 0);
176  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
177 
178  uint32_t col_off{0};
179  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
181  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
182  CHECK_EQ(size_t(1), agg_fn_names.size());
183  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
184  llvm::Value* agg_col_ptr{nullptr};
185  if (is_group_by) {
186  if (query_mem_desc.didOutputColumnar()) {
187  col_off = query_mem_desc.getColOffInBytes(slot_index);
188  CHECK_EQ(size_t(0), col_off % chosen_bytes);
189  col_off /= chosen_bytes;
190  CHECK(std::get<1>(agg_out_ptr_w_idx));
191  auto offset =
192  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
193  agg_col_ptr = LL_BUILDER.CreateGEP(
194  LL_BUILDER.CreateBitCast(
195  std::get<0>(agg_out_ptr_w_idx),
196  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0)),
197  offset);
198  } else {
199  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
200  CHECK_EQ(size_t(0), col_off % chosen_bytes);
201  col_off /= chosen_bytes;
202  agg_col_ptr = LL_BUILDER.CreateGEP(
203  LL_BUILDER.CreateBitCast(
204  std::get<0>(agg_out_ptr_w_idx),
205  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0)),
206  LL_INT(col_off));
207  }
208  }
209 
210  if (chosen_bytes != sizeof(int32_t)) {
211  CHECK_EQ(8, chosen_bytes);
212  if (g_bigint_count) {
213  const auto acc_i64 = LL_BUILDER.CreateBitCast(
214  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
215  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
216  if (gpu_smem_context.isSharedMemoryUsed()) {
217  group_by_and_agg->emitCall(
218  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
219  } else {
220  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
221  acc_i64,
222  LL_INT(int64_t(1)),
223  llvm::AtomicOrdering::Monotonic);
224  }
225  } else {
226  auto acc_i32 = LL_BUILDER.CreateBitCast(
227  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
228  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
229  if (gpu_smem_context.isSharedMemoryUsed()) {
230  acc_i32 = LL_BUILDER.CreatePointerCast(
231  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
232  }
233  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
234  acc_i32,
235  LL_INT(1),
236  llvm::AtomicOrdering::Monotonic);
237  }
238  } else {
239  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
240  if (gpu_smem_context.isSharedMemoryUsed()) {
241  // Atomic operation on address space level 3 (Shared):
242  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
243  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
244  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
245  shared_acc_i32,
246  LL_INT(1),
247  llvm::AtomicOrdering::Monotonic);
248  } else {
249  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
250  acc_i32,
251  LL_INT(1),
252  llvm::AtomicOrdering::Monotonic);
253  }
254  }
255  return;
256  }
257 
258  codegenAggregate(group_by_and_agg,
259  executor,
260  query_mem_desc,
261  co,
262  target_lvs,
263  agg_out_ptr_w_idx,
264  agg_out_vec,
265  output_buffer_byte_stream,
266  out_row_idx,
267  slot_index);
268 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool target_has_geo(const TargetInfo &target_info)
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, int32_t slot_index) const
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define CHECK_GE(x, y)
Definition: Logger.h:210
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
size_t getColOnlyOffInBytes(const size_t col_idx) const
bool is_agg
Definition: TargetInfo.h:40
#define LL_INT(v)
bool g_bigint_count
#define LL_CONTEXT
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:42
const Analyzer::Expr * target_expr
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:197
bool is_geometry() const
Definition: sqltypes.h:500
static void resetWindowFunctionContext(Executor *executor)
int get_physical_coord_cols() const
Definition: sqltypes.h:350
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void TargetExprCodegen::codegenAggregate ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::vector< llvm::Value * > &  target_lvs,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
int32_t  slot_index 
) const

Definition at line 270 of file TargetExprBuilder.cpp.

References agg_arg(), TargetInfo::agg_arg_type, anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), TargetInfo::agg_kind, AUTOMATIC_IR_METADATA, CHECK, CHECK_EQ, CHECK_GE, GroupByAndAggregate::checkErrorCode(), GroupByAndAggregate::codegenAggColumnPtr(), GroupByAndAggregate::codegenApproxMedian(), GroupByAndAggregate::codegenCountDistinct(), GroupByAndAggregate::convertNullIfAny(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), get_compact_type(), SQLTypeInfo::get_type(), WindowProjectNodeContext::getActiveWindowFunctionContext(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, is_agg_domain_range_equivalent(), TargetInfo::is_distinct, is_distinct_target(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAPPROX_MEDIAN, kAVG, kDOUBLE, kFLOAT, kNULLT, kSINGLE_VALUE, LL_BUILDER, LL_CONTEXT, LL_INT, GroupByAndAggregate::needsUnnestDoublePatch(), numeric_type_name(), patch_agg_fname(), CodeGenerator::posArg(), TargetInfo::skip_null_val, TargetInfo::sql_type, takes_float_argument(), target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), to_string(), and window_function_requires_peer_handling().

Referenced by codegen().

280  {
281  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
282  size_t target_lv_idx = 0;
283  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
284 
285  CodeGenerator code_generator(executor);
286 
287  const auto agg_fn_names = agg_fn_base_names(target_info);
288  auto arg_expr = agg_arg(target_expr);
289 
290  for (const auto& agg_base_name : agg_fn_names) {
291  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
292  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
293  sizeof(int64_t));
294  // TODO(miyu): check if buffer may be columnar here
295  CHECK(!query_mem_desc.didOutputColumnar());
296  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
297  uint32_t col_off{0};
298  if (is_group_by) {
299  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
300  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
301  col_off /= sizeof(int64_t);
302  }
303  executor->cgen_state_->emitExternalCall(
304  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
305  llvm::Type::getVoidTy(LL_CONTEXT),
306  {is_group_by
307  ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx), LL_INT(col_off))
308  : agg_out_vec[slot_index],
309  target_lvs[target_lv_idx],
310  code_generator.posArg(arg_expr),
311  elem_ti.is_fp()
312  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
313  : static_cast<llvm::Value*>(
314  executor->cgen_state_->inlineIntNull(elem_ti))});
315  ++slot_index;
316  ++target_lv_idx;
317  continue;
318  }
319 
320  llvm::Value* agg_col_ptr{nullptr};
321  const auto chosen_bytes =
322  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
323  const auto& chosen_type = get_compact_type(target_info);
324  const auto& arg_type =
325  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
329  const bool is_fp_arg =
330  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
331  if (is_group_by) {
332  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
333  out_row_idx,
334  agg_out_ptr_w_idx,
335  query_mem_desc,
336  chosen_bytes,
337  slot_index,
338  target_idx);
339  CHECK(agg_col_ptr);
340  agg_col_ptr->setName("agg_col_ptr");
341  }
342 
343  const bool float_argument_input = takes_float_argument(target_info);
344  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
345  // The count component of an average should never be compacted.
346  const auto agg_chosen_bytes =
347  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
348  if (float_argument_input) {
349  CHECK_GE(chosen_bytes, sizeof(float));
350  }
351 
352  auto target_lv = target_lvs[target_lv_idx];
353  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
354  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
355  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
356  if (!needs_unnest_double_patch) {
357  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
358  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
359  } else if (is_fp_arg) {
360  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
361  }
362  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
363  target_lv =
364  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
365  }
366  }
367 
368  const bool is_simple_count_target = is_simple_count(target_info);
369  llvm::Value* str_target_lv{nullptr};
370  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
371  // none encoding string
372  str_target_lv = target_lvs.front();
373  }
374  std::vector<llvm::Value*> agg_args{
375  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
376  (agg_chosen_bytes << 3)),
377  (is_simple_count_target && !arg_expr)
378  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
379  : LL_INT(int64_t(0)))
380  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
381  : target_lv)};
382  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
383  if (is_simple_count_target && arg_expr && str_target_lv) {
384  agg_args[1] =
385  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
386  }
387  }
388  std::string agg_fname{agg_base_name};
389  if (is_fp_arg) {
390  if (!lazy_fetched) {
391  if (agg_chosen_bytes == sizeof(float)) {
392  CHECK_EQ(arg_type.get_type(), kFLOAT);
393  agg_fname += "_float";
394  } else {
395  CHECK_EQ(agg_chosen_bytes, sizeof(double));
396  agg_fname += "_double";
397  }
398  }
399  } else if (agg_chosen_bytes == sizeof(int32_t)) {
400  agg_fname += "_int32";
401  } else if (agg_chosen_bytes == sizeof(int16_t) &&
402  query_mem_desc.didOutputColumnar()) {
403  agg_fname += "_int16";
404  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
405  agg_fname += "_int8";
406  }
407 
409  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
410  CHECK(!chosen_type.is_fp());
411  group_by_and_agg->codegenCountDistinct(
412  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
413  } else if (target_info.agg_kind == kAPPROX_MEDIAN) {
414  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
415  group_by_and_agg->codegenApproxMedian(
416  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
417  } else {
418  const auto& arg_ti = target_info.agg_arg_type;
419  if (need_skip_null && !arg_ti.is_geometry()) {
420  agg_fname += "_skip_val";
421  }
422 
424  (need_skip_null && !arg_ti.is_geometry())) {
425  llvm::Value* null_in_lv{nullptr};
426  if (arg_ti.is_fp()) {
427  null_in_lv =
428  static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(arg_ti));
429  } else {
430  null_in_lv = static_cast<llvm::Value*>(executor->cgen_state_->inlineIntNull(
432  ? arg_ti
433  : target_info.sql_type));
434  }
435  CHECK(null_in_lv);
436  auto null_lv =
437  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
438  agg_args.push_back(null_lv);
439  }
440  if (!target_info.is_distinct) {
442  query_mem_desc.threadsShareMemory()) {
443  agg_fname += "_shared";
444  if (needs_unnest_double_patch) {
445  agg_fname = patch_agg_fname(agg_fname);
446  }
447  }
448  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
449 
450  if (agg_fname.find("checked") != std::string::npos) {
451  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
452  }
453  }
454  }
455  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
456  if (window_func && window_function_requires_peer_handling(window_func)) {
457  const auto window_func_context =
459  const auto pending_outputs =
460  LL_INT(window_func_context->aggregateStatePendingOutputs());
461  executor->cgen_state_->emitExternalCall("add_window_pending_output",
462  llvm::Type::getVoidTy(LL_CONTEXT),
463  {agg_args.front(), pending_outputs});
464  const auto& window_func_ti = window_func->get_type_info();
465  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
466  switch (window_func_ti.get_type()) {
467  case kFLOAT: {
468  apply_window_pending_outputs_name += "_float";
469  if (query_mem_desc.didOutputColumnar()) {
470  apply_window_pending_outputs_name += "_columnar";
471  }
472  break;
473  }
474  case kDOUBLE: {
475  apply_window_pending_outputs_name += "_double";
476  break;
477  }
478  default: {
479  apply_window_pending_outputs_name += "_int";
480  if (query_mem_desc.didOutputColumnar()) {
481  apply_window_pending_outputs_name +=
482  std::to_string(window_func_ti.get_size() * 8);
483  } else {
484  apply_window_pending_outputs_name += "64";
485  }
486  break;
487  }
488  }
489  const auto partition_end =
490  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
491  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
492  llvm::Type::getVoidTy(LL_CONTEXT),
493  {pending_outputs,
494  target_lvs.front(),
495  partition_end,
496  code_generator.posArg(nullptr)});
497  }
498 
499  ++slot_index;
500  ++target_lv_idx;
501  }
502 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool target_has_geo(const TargetInfo &target_info)
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
void codegenApproxMedian(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
#define CHECK_GE(x, y)
Definition: Logger.h:210
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:134
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:314
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:44
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
std::string patch_agg_fname(const std::string &agg_name)
size_t getColOnlyOffInBytes(const size_t col_idx) const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:130
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:41
ExecutorDeviceType device_type
const Analyzer::Expr * target_expr
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:197
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:211
bool is_distinct
Definition: TargetInfo.h:45
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:72
bool is_agg_domain_range_equivalent(const SQLAgg &agg_kind)
Definition: TargetInfo.h:65

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Member Data Documentation

int32_t TargetExprCodegen::base_slot_index

Definition at line 71 of file TargetExprBuilder.h.

Referenced by codegen().

const Analyzer::Expr* TargetExprCodegen::target_expr

Definition at line 68 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

size_t TargetExprCodegen::target_idx

Definition at line 72 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

TargetInfo TargetExprCodegen::target_info

The documentation for this struct was generated from the following files: