OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprCodegen Struct Reference

#include <TargetExprBuilder.h>

+ Collaboration diagram for TargetExprCodegen:

Public Member Functions

 TargetExprCodegen (const Analyzer::Expr *target_expr, TargetInfo &target_info, const int32_t base_slot_index, const size_t target_idx, const bool is_group_by)
 
void codegen (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen, DiamondCodegen *sample_cfg=nullptr) const
 
void codegenAggregate (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const
 

Public Attributes

const Analyzer::Exprtarget_expr
 
TargetInfo target_info
 
int32_t base_slot_index
 
size_t target_idx
 
bool is_group_by
 

Friends

std::ostream & operator<< (std::ostream &os, const TargetExprCodegen &target_expr_codegen)
 

Detailed Description

Definition at line 33 of file TargetExprBuilder.h.

Constructor & Destructor Documentation

TargetExprCodegen::TargetExprCodegen ( const Analyzer::Expr target_expr,
TargetInfo target_info,
const int32_t  base_slot_index,
const size_t  target_idx,
const bool  is_group_by 
)
inline

Definition at line 34 of file TargetExprBuilder.h.

39  : target_expr(target_expr)
40  , target_info(target_info)
const Analyzer::Expr * target_expr

Member Function Documentation

void TargetExprCodegen::codegen ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const GpuSharedMemoryContext gpu_smem_context,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
llvm::Value *  varlen_output_buffer,
DiamondCodegen diamond_codegen,
DiamondCodegen sample_cfg = nullptr 
) const

Definition at line 116 of file TargetExprBuilder.cpp.

References agg_arg(), anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), AUTOMATIC_IR_METADATA, base_slot_index, shared::bit_cast(), CHECK, CHECK_EQ, CHECK_GE, GroupByAndAggregate::codegenAggArg(), codegenAggregate(), GroupByAndAggregate::codegenWindowRowPointer(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), g_bigint_count, get_int_type(), SQLTypeInfo::get_physical_coord_cols(), QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), GpuSharedMemoryContext::isSharedMemoryUsed(), LL_BUILDER, LL_CONTEXT, LL_INT, LLVM_ALIGN, WindowProjectNodeContext::resetWindowFunctionContext(), TargetInfo::sql_type, target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), SQLTypeInfo::usesFlatBuffer(), and window_function_is_aggregate().

128  {
129  CHECK(group_by_and_agg);
130  CHECK(executor);
131  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
132  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
133  const auto arg_expr = agg_arg(target_expr);
134  const bool varlen_projection = is_varlen_projection(target_expr, target_info.sql_type);
135  const bool uses_flatbuffer = target_info.sql_type.usesFlatBuffer();
136  const auto agg_fn_names = agg_fn_base_names(target_info, varlen_projection);
137  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
139  auto target_lvs =
140  window_func
141  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
142  : group_by_and_agg->codegenAggArg(target_expr, co);
143  const auto window_row_ptr = window_func
144  ? group_by_and_agg->codegenWindowRowPointer(
145  window_func, query_mem_desc, co, diamond_codegen)
146  : nullptr;
147  if (window_row_ptr) {
148  agg_out_ptr_w_idx =
149  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
150  if (window_function_is_aggregate(window_func->getKind())) {
151  out_row_idx = window_row_ptr;
152  }
153  }
154 
155  llvm::Value* str_target_lv{nullptr};
156  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
157  // none encoding string, pop the packed pointer + length since
158  // it's only useful for IS NULL checks and assumed to be only
159  // two components (pointer and length) for the purpose of projection
160  str_target_lv = target_lvs.front();
161  target_lvs.erase(target_lvs.begin());
162  }
163  if (target_info.sql_type.is_geometry() && !varlen_projection) {
164  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
165  // array. Ensure that the target values generated match the number of agg
166  // functions before continuing
167  if (target_lvs.size() < agg_fn_names.size()) {
168  if (!uses_flatbuffer) {
169  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
170  }
171  std::vector<llvm::Value*> new_target_lvs;
172  new_target_lvs.reserve(agg_fn_names.size());
173  for (const auto& target_lv : target_lvs) {
174  new_target_lvs.push_back(target_lv);
175  new_target_lvs.push_back(target_lv);
176  }
177  target_lvs = new_target_lvs;
178  }
179  }
180  if (target_lvs.size() < agg_fn_names.size()) {
181  if (!uses_flatbuffer) {
182  CHECK_EQ(size_t(1), target_lvs.size());
183  CHECK_EQ(size_t(2), agg_fn_names.size());
184  }
185  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
186  target_lvs.push_back(target_lvs.front());
187  }
188  } else {
190  if (!target_info.is_agg && !varlen_projection) {
191  if (!uses_flatbuffer) {
192  CHECK_EQ(
193  static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
194  target_lvs.size());
195  }
196  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
197  }
198  } else {
199  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
200  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
201  }
202  }
203 
204  int32_t slot_index = base_slot_index;
205  CHECK_GE(slot_index, 0);
206  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
207 
208  uint32_t col_off{0};
209  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
211  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
212  CHECK_EQ(size_t(1), agg_fn_names.size());
213  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
214  llvm::Value* agg_col_ptr{nullptr};
215  if (is_group_by) {
216  if (query_mem_desc.didOutputColumnar()) {
217  col_off = query_mem_desc.getColOffInBytes(slot_index);
218  CHECK_EQ(size_t(0), col_off % chosen_bytes);
219  col_off /= chosen_bytes;
220  CHECK(std::get<1>(agg_out_ptr_w_idx));
221  auto offset =
222  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
223  auto* bit_cast = LL_BUILDER.CreateBitCast(
224  std::get<0>(agg_out_ptr_w_idx),
225  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
226  agg_col_ptr = LL_BUILDER.CreateGEP(
227  bit_cast->getType()->getScalarType()->getPointerElementType(),
228  bit_cast,
229  offset);
230  } else {
231  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
232  CHECK_EQ(size_t(0), col_off % chosen_bytes);
233  col_off /= chosen_bytes;
234  auto* bit_cast = LL_BUILDER.CreateBitCast(
235  std::get<0>(agg_out_ptr_w_idx),
236  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
237  agg_col_ptr = LL_BUILDER.CreateGEP(
238  bit_cast->getType()->getScalarType()->getPointerElementType(),
239  bit_cast,
240  LL_INT(col_off));
241  }
242  }
243 
244  if (chosen_bytes != sizeof(int32_t)) {
245  CHECK_EQ(8, chosen_bytes);
246  if (g_bigint_count) {
247  const auto acc_i64 = LL_BUILDER.CreateBitCast(
248  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
249  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
250  if (gpu_smem_context.isSharedMemoryUsed()) {
251  group_by_and_agg->emitCall(
252  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
253  } else {
254  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
255  acc_i64,
256  LL_INT(int64_t(1)),
257 #if LLVM_VERSION_MAJOR > 12
258  LLVM_ALIGN(8),
259 #endif
260  llvm::AtomicOrdering::Monotonic);
261  }
262  } else {
263  auto acc_i32 = LL_BUILDER.CreateBitCast(
264  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
265  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
266  if (gpu_smem_context.isSharedMemoryUsed()) {
267  acc_i32 = LL_BUILDER.CreatePointerCast(
268  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
269  }
270  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
271  acc_i32,
272  LL_INT(1),
273 #if LLVM_VERSION_MAJOR > 12
274  LLVM_ALIGN(4),
275 #endif
276  llvm::AtomicOrdering::Monotonic);
277  }
278  } else {
279  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
280  if (gpu_smem_context.isSharedMemoryUsed()) {
281  // Atomic operation on address space level 3 (Shared):
282  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
283  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
284  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
285  shared_acc_i32,
286  LL_INT(1),
287 #if LLVM_VERSION_MAJOR > 12
288  LLVM_ALIGN(4),
289 #endif
290  llvm::AtomicOrdering::Monotonic);
291  } else {
292  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
293  acc_i32,
294  LL_INT(1),
295 #if LLVM_VERSION_MAJOR > 12
296  LLVM_ALIGN(4),
297 #endif
298  llvm::AtomicOrdering::Monotonic);
299  }
300  }
301  return;
302  }
303 
304  codegenAggregate(group_by_and_agg,
305  executor,
306  query_mem_desc,
307  co,
308  target_lvs,
309  agg_out_ptr_w_idx,
310  agg_out_vec,
311  output_buffer_byte_stream,
312  out_row_idx,
313  varlen_output_buffer,
314  slot_index);
315 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
#define LLVM_ALIGN(alignment)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
size_t getColOnlyOffInBytes(const size_t col_idx) const
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool is_agg
Definition: TargetInfo.h:50
#define LL_INT(v)
bool g_bigint_count
#define LL_CONTEXT
bool usesFlatBuffer() const
Definition: sqltypes.h:1083
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
ExecutorDeviceType device_type
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:61
const Analyzer::Expr * target_expr
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
TO bit_cast(FROM &&from)
Definition: misc.h:307
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:597
static void resetWindowFunctionContext(Executor *executor)
int get_physical_coord_cols() const
Definition: sqltypes.h:451
size_t getColOffInBytes(const size_t col_idx) const
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const

+ Here is the call graph for this function:

void TargetExprCodegen::codegenAggregate ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::vector< llvm::Value * > &  target_lvs,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
llvm::Value *  varlen_output_buffer,
int32_t  slot_index 
) const

Definition at line 317 of file TargetExprBuilder.cpp.

References agg_arg(), TargetInfo::agg_arg_type, anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), TargetInfo::agg_kind, AUTOMATIC_IR_METADATA, CHECK, CHECK_EQ, CHECK_GE, CHECK_LT, GroupByAndAggregate::checkErrorCode(), CodeGenerator::codegen(), GroupByAndAggregate::codegenAggColumnPtr(), GroupByAndAggregate::codegenApproxQuantile(), GroupByAndAggregate::codegenCountDistinct(), GroupByAndAggregate::codegenMode(), DiamondCodegen::cond_false_, DiamondCodegen::cond_true_, GroupByAndAggregate::convertNullIfAny(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), get_arg_by_name(), get_compact_type(), SQLTypeInfo::get_compression(), get_int_type(), SQLTypeInfo::get_type(), WindowProjectNodeContext::getActiveWindowFunctionContext(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, is_agg_domain_range_equivalent(), TargetInfo::is_distinct, is_distinct_target(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), anonymous_namespace{TargetExprBuilder.cpp}::is_varlen_projection(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAPPROX_QUANTILE, kAVG, kDOUBLE, kENCODING_GEOINT, kFLOAT, kMODE, kNULLT, kPOINT, kSINGLE_VALUE, kSUM_IF, LL_BUILDER, LL_CONTEXT, LL_INT, GroupByAndAggregate::needsUnnestDoublePatch(), numeric_type_name(), patch_agg_fname(), CodeGenerator::posArg(), ROW_FUNC, TargetInfo::skip_null_val, TargetInfo::sql_type, takes_float_argument(), target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), to_string(), QueryMemoryDescriptor::varlenOutputBufferElemSize(), QueryMemoryDescriptor::varlenOutputRowSizeToSlot(), and window_function_requires_peer_handling().

Referenced by codegen().

328  {
329  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
330  size_t target_lv_idx = 0;
331  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
332 
333  CodeGenerator code_generator(executor);
334 
335  const auto agg_fn_names = agg_fn_base_names(
337  auto arg_expr = agg_arg(target_expr);
338 
339  for (const auto& agg_base_name : agg_fn_names) {
340  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
341  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
342  sizeof(int64_t));
343  // TODO(miyu): check if buffer may be columnar here
344  CHECK(!query_mem_desc.didOutputColumnar());
345  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
346  uint32_t col_off{0};
347  if (is_group_by) {
348  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
349  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
350  col_off /= sizeof(int64_t);
351  }
352  executor->cgen_state_->emitExternalCall(
353  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
354  llvm::Type::getVoidTy(LL_CONTEXT),
355  {is_group_by ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx)
356  ->getType()
357  ->getScalarType()
358  ->getPointerElementType(),
359  std::get<0>(agg_out_ptr_w_idx),
360  LL_INT(col_off))
361  : agg_out_vec[slot_index],
362  target_lvs[target_lv_idx],
363  code_generator.posArg(arg_expr),
364  elem_ti.is_fp()
365  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
366  : static_cast<llvm::Value*>(
367  executor->cgen_state_->inlineIntNull(elem_ti))});
368  ++slot_index;
369  ++target_lv_idx;
370  continue;
371  }
372 
373  llvm::Value* agg_col_ptr{nullptr};
374  const auto chosen_bytes =
375  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
376  const auto& chosen_type = get_compact_type(target_info);
377  const auto& arg_type =
378  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
382  const bool is_fp_arg =
383  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
384  if (is_group_by) {
385  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
386  out_row_idx,
387  agg_out_ptr_w_idx,
388  query_mem_desc,
389  chosen_bytes,
390  slot_index,
391  target_idx);
392  CHECK(agg_col_ptr);
393  agg_col_ptr->setName("agg_col_ptr");
394  }
395 
397  CHECK(!query_mem_desc.didOutputColumnar());
398 
400  CHECK_LT(target_lv_idx, target_lvs.size());
401  CHECK(varlen_output_buffer);
402  auto target_lv = target_lvs[target_lv_idx];
403 
404  std::string agg_fname_suffix = "";
406  query_mem_desc.threadsShareMemory()) {
407  agg_fname_suffix += "_shared";
408  }
409 
410  // first write the varlen data into the varlen buffer and get the pointer location
411  // into the varlen buffer
412  auto& builder = executor->cgen_state_->ir_builder_;
413  auto orig_bb = builder.GetInsertBlock();
414  auto target_ptr_type = llvm::dyn_cast<llvm::PointerType>(target_lv->getType());
415  CHECK(target_ptr_type) << "Varlen projections expect a pointer input.";
416  auto is_nullptr =
417  builder.CreateICmp(llvm::CmpInst::ICMP_EQ,
418  target_lv,
419  llvm::ConstantPointerNull::get(llvm::PointerType::get(
420  target_ptr_type->getPointerElementType(), 0)));
421  llvm::BasicBlock* true_bb{nullptr};
422  {
423  DiamondCodegen nullcheck_diamond(
424  is_nullptr, executor, false, "varlen_null_check", nullptr, false);
425  // maintain a reference to the true bb, overriding the diamond codegen destructor
426  true_bb = nullcheck_diamond.cond_true_;
427  // if not null, process the pointer and insert it into the varlen buffer
428  builder.SetInsertPoint(nullcheck_diamond.cond_false_);
429  auto arr_ptr_lv = executor->cgen_state_->ir_builder_.CreateBitCast(
430  target_lv,
431  llvm::PointerType::get(get_int_type(8, executor->cgen_state_->context_), 0));
432  const int64_t chosen_bytes =
434  auto* arg = get_arg_by_name(ROW_FUNC, "old_total_matched");
435  const auto output_buffer_slot = LL_BUILDER.CreateZExt(
436  LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg),
437  llvm::Type::getInt64Ty(LL_CONTEXT));
438  const auto varlen_buffer_row_sz = query_mem_desc.varlenOutputBufferElemSize();
439  CHECK(varlen_buffer_row_sz);
440  const auto output_buffer_slot_bytes = LL_BUILDER.CreateAdd(
441  LL_BUILDER.CreateMul(output_buffer_slot,
442  executor->cgen_state_->llInt(
443  static_cast<int64_t>(*varlen_buffer_row_sz))),
444  executor->cgen_state_->llInt(static_cast<int64_t>(
445  query_mem_desc.varlenOutputRowSizeToSlot(slot_index))));
446 
447  std::vector<llvm::Value*> varlen_agg_args{
448  executor->castToIntPtrTyIn(varlen_output_buffer, 8),
449  output_buffer_slot_bytes,
450  arr_ptr_lv,
451  executor->cgen_state_->llInt(chosen_bytes)};
452  auto varlen_offset_ptr =
453  group_by_and_agg->emitCall(agg_base_name + agg_fname_suffix, varlen_agg_args);
454 
455  // then write that pointer location into the 64 bit slot in the output buffer
456  auto varlen_offset_int = LL_BUILDER.CreatePtrToInt(
457  varlen_offset_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
458  builder.CreateBr(nullcheck_diamond.cond_true_);
459 
460  // use the true block to do the output buffer insertion regardless of nullness
461  builder.SetInsertPoint(nullcheck_diamond.cond_true_);
462  auto output_phi =
463  builder.CreatePHI(llvm::Type::getInt64Ty(executor->cgen_state_->context_), 2);
464  output_phi->addIncoming(varlen_offset_int, nullcheck_diamond.cond_false_);
465  output_phi->addIncoming(executor->cgen_state_->llInt(static_cast<int64_t>(0)),
466  orig_bb);
467 
468  std::vector<llvm::Value*> agg_args{agg_col_ptr, output_phi};
469  group_by_and_agg->emitCall("agg_id" + agg_fname_suffix, agg_args);
470  }
471  CHECK(true_bb);
472  builder.SetInsertPoint(true_bb);
473 
474  ++slot_index;
475  ++target_lv_idx;
476  continue;
477  }
478 
479  const bool float_argument_input = takes_float_argument(target_info);
480  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
481  // The count component of an average should never be compacted.
482  const auto agg_chosen_bytes =
483  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
484  if (float_argument_input) {
485  CHECK_GE(chosen_bytes, sizeof(float));
486  }
487 
488  auto target_lv = target_lvs[target_lv_idx];
489  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
490  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
491  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
492  if (!needs_unnest_double_patch) {
493  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
494  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
495  } else if (is_fp_arg) {
496  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
497  }
498  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
499  target_lv =
500  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
501  }
502  }
503 
504  const bool is_simple_count_target = is_simple_count(target_info);
505  llvm::Value* str_target_lv{nullptr};
506  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
507  // none encoding string
508  str_target_lv = target_lvs.front();
509  }
510  std::vector<llvm::Value*> agg_args{
511  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
512  (agg_chosen_bytes << 3)),
513  (is_simple_count_target && !arg_expr)
514  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
515  : LL_INT(int64_t(0)))
516  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
517  : target_lv)};
518  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
519  if (is_simple_count_target && arg_expr && str_target_lv) {
520  agg_args[1] =
521  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
522  }
523  }
524  std::string agg_fname{agg_base_name};
525  if (is_fp_arg) {
526  if (!lazy_fetched) {
527  if (agg_chosen_bytes == sizeof(float)) {
528  CHECK_EQ(arg_type.get_type(), kFLOAT);
529  agg_fname += "_float";
530  } else {
531  CHECK_EQ(agg_chosen_bytes, sizeof(double));
532  agg_fname += "_double";
533  }
534  }
535  } else if (agg_chosen_bytes == sizeof(int32_t)) {
536  agg_fname += "_int32";
537  } else if (agg_chosen_bytes == sizeof(int16_t) &&
538  query_mem_desc.didOutputColumnar()) {
539  agg_fname += "_int16";
540  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
541  agg_fname += "_int8";
542  }
543 
545  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
546  CHECK(!chosen_type.is_fp());
547  group_by_and_agg->codegenCountDistinct(
548  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
549  } else if (target_info.agg_kind == kAPPROX_QUANTILE) {
550  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
551  group_by_and_agg->codegenApproxQuantile(
552  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
553  } else if (target_info.agg_kind == kMODE) {
554  group_by_and_agg->codegenMode(
555  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
556  } else {
557  const auto& arg_ti = target_info.agg_arg_type;
558  if (need_skip_null && !arg_ti.is_geometry()) {
559  agg_fname += "_skip_val";
560  }
561 
563  (need_skip_null && !arg_ti.is_geometry())) {
564  llvm::Value* null_in_lv{nullptr};
565  if (arg_ti.is_fp()) {
566  null_in_lv = executor->cgen_state_->inlineFpNull(arg_ti);
567  } else {
568  null_in_lv = executor->cgen_state_->inlineIntNull(
570  ? arg_ti
572  }
573  CHECK(null_in_lv);
574  auto null_lv =
575  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
576  agg_args.push_back(null_lv);
577  }
578  if (target_info.agg_kind == kSUM_IF) {
579  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(target_expr);
580  auto cond_expr_lv =
581  code_generator.codegen(agg_expr->get_arg1().get(), true, co).front();
582  auto cond_lv = executor->codegenConditionalAggregateCondValSelector(
583  cond_expr_lv, kSUM_IF, co);
584  agg_args.push_back(cond_lv);
585  }
586  if (!target_info.is_distinct) {
588  query_mem_desc.threadsShareMemory()) {
589  agg_fname += "_shared";
590  if (needs_unnest_double_patch) {
591  agg_fname = patch_agg_fname(agg_fname);
592  }
593  }
594  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
595 
596  if (agg_fname.find("checked") != std::string::npos) {
597  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
598  }
599  }
600  }
601  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
602  // window function with framing has a different code path and codegen logic
603  if (window_func && !window_func->hasFraming() &&
605  const auto window_func_context =
607  const auto pending_outputs =
608  LL_INT(window_func_context->aggregateStatePendingOutputs());
609  executor->cgen_state_->emitExternalCall("add_window_pending_output",
610  llvm::Type::getVoidTy(LL_CONTEXT),
611  {agg_args.front(), pending_outputs});
612  const auto& window_func_ti = window_func->get_type_info();
613  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
614  switch (window_func_ti.get_type()) {
615  case kFLOAT: {
616  apply_window_pending_outputs_name += "_float";
617  if (query_mem_desc.didOutputColumnar()) {
618  apply_window_pending_outputs_name += "_columnar";
619  }
620  break;
621  }
622  case kDOUBLE: {
623  apply_window_pending_outputs_name += "_double";
624  break;
625  }
626  default: {
627  apply_window_pending_outputs_name += "_int";
628  if (query_mem_desc.didOutputColumnar()) {
629  apply_window_pending_outputs_name +=
630  std::to_string(window_func_ti.get_size() * 8);
631  } else {
632  apply_window_pending_outputs_name += "64";
633  }
634  break;
635  }
636  }
637  const auto partition_end =
638  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
639  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
640  llvm::Type::getVoidTy(LL_CONTEXT),
641  {pending_outputs,
642  target_lvs.front(),
643  partition_end,
644  code_generator.posArg(nullptr)});
645  }
646 
647  ++slot_index;
648  ++target_lv_idx;
649  }
650 }
size_t varlenOutputRowSizeToSlot(const size_t slot_idx) const
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
bool isLogicalSizedColumnsAllowed() const
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
bool is_agg_domain_range_equivalent(const SQLAgg agg_kind)
Definition: TargetInfo.h:83
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:106
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:391
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:54
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
std::string patch_agg_fname(const std::string &agg_name)
size_t getColOnlyOffInBytes(const size_t col_idx) const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:168
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:102
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:51
ExecutorDeviceType device_type
std::optional< size_t > varlenOutputBufferElemSize() const
#define CHECK_LT(x, y)
Definition: Logger.h:303
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:399
const Analyzer::Expr * target_expr
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:230
bool is_distinct
Definition: TargetInfo.h:55
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:77
Definition: sqldefs.h:86
#define ROW_FUNC

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Friends And Related Function Documentation

std::ostream& operator<< ( std::ostream &  os,
const TargetExprCodegen target_expr_codegen 
)
friend

Definition at line 993 of file TargetExprBuilder.cpp.

993  {
994  os << "(target_expr: " << target_expr_codegen.target_expr->toString()
995  << ", target_info: " << target_expr_codegen.target_info.toString()
996  << ", base_slot_index: " << target_expr_codegen.base_slot_index
997  << ", target_idx:" << target_expr_codegen.target_idx
998  << ", is_group_by: " << target_expr_codegen.is_group_by << ")";
999  return os;
1000 }
std::string toString() const
Definition: TargetInfo.h:59
virtual std::string toString() const =0
const Analyzer::Expr * target_expr

Member Data Documentation

int32_t TargetExprCodegen::base_slot_index

Definition at line 76 of file TargetExprBuilder.h.

Referenced by codegen(), and operator<<().

const Analyzer::Expr* TargetExprCodegen::target_expr

Definition at line 73 of file TargetExprBuilder.h.

Referenced by codegen(), codegenAggregate(), and operator<<().

size_t TargetExprCodegen::target_idx

Definition at line 77 of file TargetExprBuilder.h.

Referenced by codegen(), codegenAggregate(), and operator<<().

TargetInfo TargetExprCodegen::target_info

The documentation for this struct was generated from the following files: