OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprCodegen Struct Reference

#include <TargetExprBuilder.h>

+ Collaboration diagram for TargetExprCodegen:

Public Member Functions

 TargetExprCodegen (const Analyzer::Expr *target_expr, TargetInfo &target_info, const int32_t base_slot_index, const size_t target_idx, const bool is_group_by)
 
void codegen (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen, DiamondCodegen *sample_cfg=nullptr) const
 
void codegenAggregate (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const
 

Public Attributes

const Analyzer::Exprtarget_expr
 
TargetInfo target_info
 
int32_t base_slot_index
 
size_t target_idx
 
bool is_group_by
 

Detailed Description

Definition at line 33 of file TargetExprBuilder.h.

Constructor & Destructor Documentation

TargetExprCodegen::TargetExprCodegen ( const Analyzer::Expr target_expr,
TargetInfo target_info,
const int32_t  base_slot_index,
const size_t  target_idx,
const bool  is_group_by 
)
inline

Definition at line 34 of file TargetExprBuilder.h.

39  : target_expr(target_expr)
40  , target_info(target_info)
const Analyzer::Expr * target_expr

Member Function Documentation

void TargetExprCodegen::codegen ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const GpuSharedMemoryContext gpu_smem_context,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
llvm::Value *  varlen_output_buffer,
DiamondCodegen diamond_codegen,
DiamondCodegen sample_cfg = nullptr 
) const

Definition at line 116 of file TargetExprBuilder.cpp.

References agg_arg(), anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), AUTOMATIC_IR_METADATA, base_slot_index, shared::bit_cast(), CHECK, CHECK_EQ, CHECK_GE, GroupByAndAggregate::codegenAggArg(), codegenAggregate(), GroupByAndAggregate::codegenWindowRowPointer(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), g_bigint_count, get_int_type(), SQLTypeInfo::get_physical_coord_cols(), QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), GpuSharedMemoryContext::isSharedMemoryUsed(), LL_BUILDER, LL_CONTEXT, LL_INT, LLVM_ALIGN, WindowProjectNodeContext::resetWindowFunctionContext(), TargetInfo::sql_type, target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), and window_function_is_aggregate().

128  {
129  CHECK(group_by_and_agg);
130  CHECK(executor);
131  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
132 
133  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
134  const auto arg_expr = agg_arg(target_expr);
135 
136  const bool varlen_projection = is_varlen_projection(target_expr, target_info.sql_type);
137  const auto agg_fn_names = agg_fn_base_names(target_info, varlen_projection);
138  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
140  auto target_lvs =
141  window_func
142  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
143  : group_by_and_agg->codegenAggArg(target_expr, co);
144  const auto window_row_ptr = window_func
145  ? group_by_and_agg->codegenWindowRowPointer(
146  window_func, query_mem_desc, co, diamond_codegen)
147  : nullptr;
148  if (window_row_ptr) {
149  agg_out_ptr_w_idx =
150  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
151  if (window_function_is_aggregate(window_func->getKind())) {
152  out_row_idx = window_row_ptr;
153  }
154  }
155 
156  llvm::Value* str_target_lv{nullptr};
157  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
158  // none encoding string, pop the packed pointer + length since
159  // it's only useful for IS NULL checks and assumed to be only
160  // two components (pointer and length) for the purpose of projection
161  str_target_lv = target_lvs.front();
162  target_lvs.erase(target_lvs.begin());
163  }
164  if (target_info.sql_type.is_geometry() && !varlen_projection) {
165  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
166  // array. Ensure that the target values generated match the number of agg
167  // functions before continuing
168  if (target_lvs.size() < agg_fn_names.size()) {
169  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
170  std::vector<llvm::Value*> new_target_lvs;
171  new_target_lvs.reserve(agg_fn_names.size());
172  for (const auto& target_lv : target_lvs) {
173  new_target_lvs.push_back(target_lv);
174  new_target_lvs.push_back(target_lv);
175  }
176  target_lvs = new_target_lvs;
177  }
178  }
179  if (target_lvs.size() < agg_fn_names.size()) {
180  CHECK_EQ(size_t(1), target_lvs.size());
181  CHECK_EQ(size_t(2), agg_fn_names.size());
182  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
183  target_lvs.push_back(target_lvs.front());
184  }
185  } else {
187  if (!target_info.is_agg && !varlen_projection) {
188  CHECK_EQ(static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
189  target_lvs.size());
190  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
191  }
192  } else {
193  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
194  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
195  }
196  }
197 
198  int32_t slot_index = base_slot_index;
199  CHECK_GE(slot_index, 0);
200  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
201 
202  uint32_t col_off{0};
203  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
205  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
206  CHECK_EQ(size_t(1), agg_fn_names.size());
207  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
208  llvm::Value* agg_col_ptr{nullptr};
209  if (is_group_by) {
210  if (query_mem_desc.didOutputColumnar()) {
211  col_off = query_mem_desc.getColOffInBytes(slot_index);
212  CHECK_EQ(size_t(0), col_off % chosen_bytes);
213  col_off /= chosen_bytes;
214  CHECK(std::get<1>(agg_out_ptr_w_idx));
215  auto offset =
216  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
217  auto* bit_cast = LL_BUILDER.CreateBitCast(
218  std::get<0>(agg_out_ptr_w_idx),
219  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
220  agg_col_ptr = LL_BUILDER.CreateGEP(
221  bit_cast->getType()->getScalarType()->getPointerElementType(),
222  bit_cast,
223  offset);
224  } else {
225  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
226  CHECK_EQ(size_t(0), col_off % chosen_bytes);
227  col_off /= chosen_bytes;
228  auto* bit_cast = LL_BUILDER.CreateBitCast(
229  std::get<0>(agg_out_ptr_w_idx),
230  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
231  agg_col_ptr = LL_BUILDER.CreateGEP(
232  bit_cast->getType()->getScalarType()->getPointerElementType(),
233  bit_cast,
234  LL_INT(col_off));
235  }
236  }
237 
238  if (chosen_bytes != sizeof(int32_t)) {
239  CHECK_EQ(8, chosen_bytes);
240  if (g_bigint_count) {
241  const auto acc_i64 = LL_BUILDER.CreateBitCast(
242  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
243  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
244  if (gpu_smem_context.isSharedMemoryUsed()) {
245  group_by_and_agg->emitCall(
246  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
247  } else {
248  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
249  acc_i64,
250  LL_INT(int64_t(1)),
251 #if LLVM_VERSION_MAJOR > 12
252  LLVM_ALIGN(8),
253 #endif
254  llvm::AtomicOrdering::Monotonic);
255  }
256  } else {
257  auto acc_i32 = LL_BUILDER.CreateBitCast(
258  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
259  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
260  if (gpu_smem_context.isSharedMemoryUsed()) {
261  acc_i32 = LL_BUILDER.CreatePointerCast(
262  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
263  }
264  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
265  acc_i32,
266  LL_INT(1),
267 #if LLVM_VERSION_MAJOR > 12
268  LLVM_ALIGN(4),
269 #endif
270  llvm::AtomicOrdering::Monotonic);
271  }
272  } else {
273  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
274  if (gpu_smem_context.isSharedMemoryUsed()) {
275  // Atomic operation on address space level 3 (Shared):
276  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
277  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
278  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
279  shared_acc_i32,
280  LL_INT(1),
281 #if LLVM_VERSION_MAJOR > 12
282  LLVM_ALIGN(4),
283 #endif
284  llvm::AtomicOrdering::Monotonic);
285  } else {
286  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
287  acc_i32,
288  LL_INT(1),
289 #if LLVM_VERSION_MAJOR > 12
290  LLVM_ALIGN(4),
291 #endif
292  llvm::AtomicOrdering::Monotonic);
293  }
294  }
295  return;
296  }
297 
298  codegenAggregate(group_by_and_agg,
299  executor,
300  query_mem_desc,
301  co,
302  target_lvs,
303  agg_out_ptr_w_idx,
304  agg_out_vec,
305  output_buffer_byte_stream,
306  out_row_idx,
307  varlen_output_buffer,
308  slot_index);
309 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
#define LLVM_ALIGN(alignment)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
size_t getColOnlyOffInBytes(const size_t col_idx) const
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool is_agg
Definition: TargetInfo.h:50
#define LL_INT(v)
bool g_bigint_count
#define LL_CONTEXT
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
const Analyzer::Expr * target_expr
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
TO bit_cast(FROM &&from)
Definition: misc.h:298
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:592
static void resetWindowFunctionContext(Executor *executor)
int get_physical_coord_cols() const
Definition: sqltypes.h:433
size_t getColOffInBytes(const size_t col_idx) const
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const

+ Here is the call graph for this function:

void TargetExprCodegen::codegenAggregate ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::vector< llvm::Value * > &  target_lvs,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
llvm::Value *  varlen_output_buffer,
int32_t  slot_index 
) const

Definition at line 311 of file TargetExprBuilder.cpp.

References agg_arg(), TargetInfo::agg_arg_type, anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), TargetInfo::agg_kind, AUTOMATIC_IR_METADATA, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, GroupByAndAggregate::checkErrorCode(), CodeGenerator::codegen(), GroupByAndAggregate::codegenAggColumnPtr(), GroupByAndAggregate::codegenApproxQuantile(), GroupByAndAggregate::codegenCountDistinct(), GroupByAndAggregate::codegenMode(), DiamondCodegen::cond_false_, DiamondCodegen::cond_true_, GroupByAndAggregate::convertNullIfAny(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), get_arg_by_name(), get_compact_type(), SQLTypeInfo::get_compression(), get_int_type(), SQLTypeInfo::get_type(), WindowProjectNodeContext::getActiveWindowFunctionContext(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, is_agg_domain_range_equivalent(), TargetInfo::is_distinct, is_distinct_target(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAPPROX_QUANTILE, kAVG, kDOUBLE, kENCODING_GEOINT, kFLOAT, kMODE, kNULLT, kPOINT, kSINGLE_VALUE, kSUM_IF, LL_BUILDER, LL_CONTEXT, LL_INT, GroupByAndAggregate::needsUnnestDoublePatch(), numeric_type_name(), patch_agg_fname(), CodeGenerator::posArg(), ROW_FUNC, TargetInfo::skip_null_val, TargetInfo::sql_type, takes_float_argument(), target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), to_string(), QueryMemoryDescriptor::varlenOutputBufferElemSize(), QueryMemoryDescriptor::varlenOutputRowSizeToSlot(), and window_function_requires_peer_handling().

Referenced by codegen().

322  {
323  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
324  size_t target_lv_idx = 0;
325  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
326 
327  CodeGenerator code_generator(executor);
328 
329  const auto agg_fn_names = agg_fn_base_names(
331  auto arg_expr = agg_arg(target_expr);
332 
333  for (const auto& agg_base_name : agg_fn_names) {
334  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
335  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
336  sizeof(int64_t));
337  // TODO(miyu): check if buffer may be columnar here
338  CHECK(!query_mem_desc.didOutputColumnar());
339  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
340  uint32_t col_off{0};
341  if (is_group_by) {
342  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
343  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
344  col_off /= sizeof(int64_t);
345  }
346  executor->cgen_state_->emitExternalCall(
347  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
348  llvm::Type::getVoidTy(LL_CONTEXT),
349  {is_group_by ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx)
350  ->getType()
351  ->getScalarType()
352  ->getPointerElementType(),
353  std::get<0>(agg_out_ptr_w_idx),
354  LL_INT(col_off))
355  : agg_out_vec[slot_index],
356  target_lvs[target_lv_idx],
357  code_generator.posArg(arg_expr),
358  elem_ti.is_fp()
359  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
360  : static_cast<llvm::Value*>(
361  executor->cgen_state_->inlineIntNull(elem_ti))});
362  ++slot_index;
363  ++target_lv_idx;
364  continue;
365  }
366 
367  llvm::Value* agg_col_ptr{nullptr};
368  const auto chosen_bytes =
369  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
370  const auto& chosen_type = get_compact_type(target_info);
371  const auto& arg_type =
372  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
376  const bool is_fp_arg =
377  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
378  if (is_group_by) {
379  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
380  out_row_idx,
381  agg_out_ptr_w_idx,
382  query_mem_desc,
383  chosen_bytes,
384  slot_index,
385  target_idx);
386  CHECK(agg_col_ptr);
387  agg_col_ptr->setName("agg_col_ptr");
388  }
389 
391  CHECK(!query_mem_desc.didOutputColumnar());
392 
394  CHECK_LT(target_lv_idx, target_lvs.size());
395  CHECK(varlen_output_buffer);
396  auto target_lv = target_lvs[target_lv_idx];
397 
398  std::string agg_fname_suffix = "";
400  query_mem_desc.threadsShareMemory()) {
401  agg_fname_suffix += "_shared";
402  }
403 
404  // first write the varlen data into the varlen buffer and get the pointer location
405  // into the varlen buffer
406  auto& builder = executor->cgen_state_->ir_builder_;
407  auto orig_bb = builder.GetInsertBlock();
408  auto target_ptr_type = llvm::dyn_cast<llvm::PointerType>(target_lv->getType());
409  CHECK(target_ptr_type) << "Varlen projections expect a pointer input.";
410  auto is_nullptr =
411  builder.CreateICmp(llvm::CmpInst::ICMP_EQ,
412  target_lv,
413  llvm::ConstantPointerNull::get(llvm::PointerType::get(
414  target_ptr_type->getPointerElementType(), 0)));
415  llvm::BasicBlock* true_bb{nullptr};
416  {
417  DiamondCodegen nullcheck_diamond(
418  is_nullptr, executor, false, "varlen_null_check", nullptr, false);
419  // maintain a reference to the true bb, overriding the diamond codegen destructor
420  true_bb = nullcheck_diamond.cond_true_;
421  // if not null, process the pointer and insert it into the varlen buffer
422  builder.SetInsertPoint(nullcheck_diamond.cond_false_);
423  auto arr_ptr_lv = executor->cgen_state_->ir_builder_.CreateBitCast(
424  target_lv,
425  llvm::PointerType::get(get_int_type(8, executor->cgen_state_->context_), 0));
426  const int64_t chosen_bytes =
428  auto* arg = get_arg_by_name(ROW_FUNC, "old_total_matched");
429  const auto output_buffer_slot = LL_BUILDER.CreateZExt(
430  LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg),
431  llvm::Type::getInt64Ty(LL_CONTEXT));
432  const auto varlen_buffer_row_sz = query_mem_desc.varlenOutputBufferElemSize();
433  CHECK(varlen_buffer_row_sz);
434  const auto output_buffer_slot_bytes = LL_BUILDER.CreateAdd(
435  LL_BUILDER.CreateMul(output_buffer_slot,
436  executor->cgen_state_->llInt(
437  static_cast<int64_t>(*varlen_buffer_row_sz))),
438  executor->cgen_state_->llInt(static_cast<int64_t>(
439  query_mem_desc.varlenOutputRowSizeToSlot(slot_index))));
440 
441  std::vector<llvm::Value*> varlen_agg_args{
442  executor->castToIntPtrTyIn(varlen_output_buffer, 8),
443  output_buffer_slot_bytes,
444  arr_ptr_lv,
445  executor->cgen_state_->llInt(chosen_bytes)};
446  auto varlen_offset_ptr =
447  group_by_and_agg->emitCall(agg_base_name + agg_fname_suffix, varlen_agg_args);
448 
449  // then write that pointer location into the 64 bit slot in the output buffer
450  auto varlen_offset_int = LL_BUILDER.CreatePtrToInt(
451  varlen_offset_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
452  builder.CreateBr(nullcheck_diamond.cond_true_);
453 
454  // use the true block to do the output buffer insertion regardless of nullness
455  builder.SetInsertPoint(nullcheck_diamond.cond_true_);
456  auto output_phi =
457  builder.CreatePHI(llvm::Type::getInt64Ty(executor->cgen_state_->context_), 2);
458  output_phi->addIncoming(varlen_offset_int, nullcheck_diamond.cond_false_);
459  output_phi->addIncoming(executor->cgen_state_->llInt(static_cast<int64_t>(0)),
460  orig_bb);
461 
462  std::vector<llvm::Value*> agg_args{agg_col_ptr, output_phi};
463  group_by_and_agg->emitCall("agg_id" + agg_fname_suffix, agg_args);
464  }
465  CHECK(true_bb);
466  builder.SetInsertPoint(true_bb);
467 
468  ++slot_index;
469  ++target_lv_idx;
470  continue;
471  }
472 
473  const bool float_argument_input = takes_float_argument(target_info);
474  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
475  // The count component of an average should never be compacted.
476  const auto agg_chosen_bytes =
477  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
478  if (float_argument_input) {
479  CHECK_GE(chosen_bytes, sizeof(float));
480  }
481 
482  auto target_lv = target_lvs[target_lv_idx];
483  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
484  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
485  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
486  if (!needs_unnest_double_patch) {
487  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
488  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
489  } else if (is_fp_arg) {
490  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
491  }
492  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
493  target_lv =
494  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
495  }
496  }
497 
498  const bool is_simple_count_target = is_simple_count(target_info);
499  llvm::Value* str_target_lv{nullptr};
500  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
501  // none encoding string
502  str_target_lv = target_lvs.front();
503  }
504  std::vector<llvm::Value*> agg_args{
505  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
506  (agg_chosen_bytes << 3)),
507  (is_simple_count_target && !arg_expr)
508  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
509  : LL_INT(int64_t(0)))
510  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
511  : target_lv)};
512  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
513  if (is_simple_count_target && arg_expr && str_target_lv) {
514  agg_args[1] =
515  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
516  }
517  }
518  std::string agg_fname{agg_base_name};
519  if (is_fp_arg) {
520  if (!lazy_fetched) {
521  if (agg_chosen_bytes == sizeof(float)) {
522  CHECK_EQ(arg_type.get_type(), kFLOAT);
523  agg_fname += "_float";
524  } else {
525  CHECK_EQ(agg_chosen_bytes, sizeof(double));
526  agg_fname += "_double";
527  }
528  }
529  } else if (agg_chosen_bytes == sizeof(int32_t)) {
530  agg_fname += "_int32";
531  } else if (agg_chosen_bytes == sizeof(int16_t) &&
532  query_mem_desc.didOutputColumnar()) {
533  agg_fname += "_int16";
534  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
535  agg_fname += "_int8";
536  }
537 
539  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
540  CHECK(!chosen_type.is_fp());
541  group_by_and_agg->codegenCountDistinct(
542  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
543  } else if (target_info.agg_kind == kAPPROX_QUANTILE) {
544  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
545  group_by_and_agg->codegenApproxQuantile(
546  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
547  } else if (target_info.agg_kind == kMODE) {
548  group_by_and_agg->codegenMode(
549  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
550  } else {
551  const auto& arg_ti = target_info.agg_arg_type;
552  if (need_skip_null && !arg_ti.is_geometry()) {
553  agg_fname += "_skip_val";
554  }
555 
557  (need_skip_null && !arg_ti.is_geometry())) {
558  llvm::Value* null_in_lv{nullptr};
559  if (arg_ti.is_fp()) {
560  null_in_lv = executor->cgen_state_->inlineFpNull(arg_ti);
561  } else {
562  null_in_lv = executor->cgen_state_->inlineIntNull(
564  ? arg_ti
566  }
567  CHECK(null_in_lv);
568  auto null_lv =
569  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
570  agg_args.push_back(null_lv);
571  }
572  if (target_info.agg_kind == kSUM_IF) {
573  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(target_expr);
574  auto cond_expr_lv =
575  code_generator.codegen(agg_expr->get_arg1().get(), true, co).front();
576  auto cond_lv = executor->codegenConditionalAggregateCondValSelector(
577  cond_expr_lv, kSUM_IF, co);
578  agg_args.push_back(cond_lv);
579  }
580  if (!target_info.is_distinct) {
582  query_mem_desc.threadsShareMemory()) {
583  agg_fname += "_shared";
584  if (needs_unnest_double_patch) {
585  agg_fname = patch_agg_fname(agg_fname);
586  }
587  }
588  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
589 
590  if (agg_fname.find("checked") != std::string::npos) {
591  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
592  }
593  }
594  }
595  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
596  // window function with framing has a different code path and codegen logic
597  if (window_func && !window_func->hasFraming() &&
599  const auto window_func_context =
601  const auto pending_outputs =
602  LL_INT(window_func_context->aggregateStatePendingOutputs());
603  executor->cgen_state_->emitExternalCall("add_window_pending_output",
604  llvm::Type::getVoidTy(LL_CONTEXT),
605  {agg_args.front(), pending_outputs});
606  const auto& window_func_ti = window_func->get_type_info();
607  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
608  switch (window_func_ti.get_type()) {
609  case kFLOAT: {
610  apply_window_pending_outputs_name += "_float";
611  if (query_mem_desc.didOutputColumnar()) {
612  apply_window_pending_outputs_name += "_columnar";
613  }
614  break;
615  }
616  case kDOUBLE: {
617  apply_window_pending_outputs_name += "_double";
618  break;
619  }
620  default: {
621  apply_window_pending_outputs_name += "_int";
622  if (query_mem_desc.didOutputColumnar()) {
623  apply_window_pending_outputs_name +=
624  std::to_string(window_func_ti.get_size() * 8);
625  } else {
626  apply_window_pending_outputs_name += "64";
627  }
628  break;
629  }
630  }
631  const auto partition_end =
632  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
633  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
634  llvm::Type::getVoidTy(LL_CONTEXT),
635  {pending_outputs,
636  target_lvs.front(),
637  partition_end,
638  code_generator.posArg(nullptr)});
639  }
640 
641  ++slot_index;
642  ++target_lv_idx;
643  }
644 }
size_t varlenOutputRowSizeToSlot(const size_t slot_idx) const
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
bool isLogicalSizedColumnsAllowed() const
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
bool is_agg_domain_range_equivalent(const SQLAgg agg_kind)
Definition: TargetInfo.h:79
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:102
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:54
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
std::string patch_agg_fname(const std::string &agg_name)
size_t getColOnlyOffInBytes(const size_t col_idx) const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:167
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:51
ExecutorDeviceType device_type
std::optional< size_t > varlenOutputBufferElemSize() const
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:389
const Analyzer::Expr * target_expr
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:209
bool is_distinct
Definition: TargetInfo.h:55
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:74
Definition: sqldefs.h:83
#define ROW_FUNC

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Member Data Documentation

int32_t TargetExprCodegen::base_slot_index

Definition at line 73 of file TargetExprBuilder.h.

Referenced by codegen().

const Analyzer::Expr* TargetExprCodegen::target_expr

Definition at line 70 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

size_t TargetExprCodegen::target_idx

Definition at line 74 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

TargetInfo TargetExprCodegen::target_info

The documentation for this struct was generated from the following files: