OmniSciDB  72180abbfe
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TargetExprCodegen Struct Reference

#include <TargetExprBuilder.h>

+ Collaboration diagram for TargetExprCodegen:

Public Member Functions

 TargetExprCodegen (const Analyzer::Expr *target_expr, TargetInfo &target_info, const int32_t base_slot_index, const size_t target_idx, const bool is_group_by)
 
void codegen (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, GroupByAndAggregate::DiamondCodegen &diamond_codegen, GroupByAndAggregate::DiamondCodegen *sample_cfg=nullptr) const
 
void codegenAggregate (GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, int32_t slot_index) const
 

Public Attributes

const Analyzer::Exprtarget_expr
 
TargetInfo target_info
 
int32_t base_slot_index
 
size_t target_idx
 
bool is_group_by
 

Detailed Description

Definition at line 33 of file TargetExprBuilder.h.

Constructor & Destructor Documentation

TargetExprCodegen::TargetExprCodegen ( const Analyzer::Expr target_expr,
TargetInfo target_info,
const int32_t  base_slot_index,
const size_t  target_idx,
const bool  is_group_by 
)
inline

Definition at line 34 of file TargetExprBuilder.h.

39  : target_expr(target_expr)
40  , target_info(target_info)
const Analyzer::Expr * target_expr

Member Function Documentation

void TargetExprCodegen::codegen ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const GpuSharedMemoryContext gpu_smem_context,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
GroupByAndAggregate::DiamondCodegen diamond_codegen,
GroupByAndAggregate::DiamondCodegen sample_cfg = nullptr 
) const

Definition at line 92 of file TargetExprBuilder.cpp.

References agg_arg(), anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), base_slot_index, CHECK(), CHECK_EQ, CHECK_GE, GroupByAndAggregate::codegenAggArg(), codegenAggregate(), GroupByAndAggregate::codegenWindowRowPointer(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), g_bigint_count, get_int_type(), SQLTypeInfo::get_physical_coord_cols(), QueryMemoryDescriptor::getColOffInBytes(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, TargetInfo::is_agg, SQLTypeInfo::is_geometry(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), GpuSharedMemoryContext::isSharedMemoryUsed(), LL_BUILDER, LL_CONTEXT, LL_INT, WindowProjectNodeContext::resetWindowFunctionContext(), TargetInfo::sql_type, target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), and window_function_is_aggregate().

103  {
104  CHECK(group_by_and_agg);
105  CHECK(executor);
106 
107  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
108  const auto arg_expr = agg_arg(target_expr);
109 
110  const auto agg_fn_names = agg_fn_base_names(target_info);
111  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
113  auto target_lvs =
114  window_func
115  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
116  : group_by_and_agg->codegenAggArg(target_expr, co);
117  const auto window_row_ptr = window_func
118  ? group_by_and_agg->codegenWindowRowPointer(
119  window_func, query_mem_desc, co, diamond_codegen)
120  : nullptr;
121  if (window_row_ptr) {
122  agg_out_ptr_w_idx =
123  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
124  if (window_function_is_aggregate(window_func->getKind())) {
125  out_row_idx = window_row_ptr;
126  }
127  }
128 
129  llvm::Value* str_target_lv{nullptr};
130  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
131  // none encoding string, pop the packed pointer + length since
132  // it's only useful for IS NULL checks and assumed to be only
133  // two components (pointer and length) for the purpose of projection
134  str_target_lv = target_lvs.front();
135  target_lvs.erase(target_lvs.begin());
136  }
138  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
139  // array. Ensure that the target values generated match the number of agg
140  // functions before continuing
141  if (target_lvs.size() < agg_fn_names.size()) {
142  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
143  std::vector<llvm::Value*> new_target_lvs;
144  new_target_lvs.reserve(agg_fn_names.size());
145  for (const auto& target_lv : target_lvs) {
146  new_target_lvs.push_back(target_lv);
147  new_target_lvs.push_back(target_lv);
148  }
149  target_lvs = new_target_lvs;
150  }
151  }
152  if (target_lvs.size() < agg_fn_names.size()) {
153  CHECK_EQ(size_t(1), target_lvs.size());
154  CHECK_EQ(size_t(2), agg_fn_names.size());
155  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
156  target_lvs.push_back(target_lvs.front());
157  }
158  } else {
160  if (!target_info.is_agg) {
161  CHECK_EQ(static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
162  target_lvs.size());
163  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
164  }
165  } else {
166  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
167  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
168  }
169  }
170 
171  int32_t slot_index = base_slot_index;
172  CHECK_GE(slot_index, 0);
173  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
174 
175  uint32_t col_off{0};
176  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
178  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
179  CHECK_EQ(size_t(1), agg_fn_names.size());
180  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
181  llvm::Value* agg_col_ptr{nullptr};
182  if (is_group_by) {
183  if (query_mem_desc.didOutputColumnar()) {
184  col_off = query_mem_desc.getColOffInBytes(slot_index);
185  CHECK_EQ(size_t(0), col_off % chosen_bytes);
186  col_off /= chosen_bytes;
187  CHECK(std::get<1>(agg_out_ptr_w_idx));
188  auto offset =
189  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
190  agg_col_ptr = LL_BUILDER.CreateGEP(
191  LL_BUILDER.CreateBitCast(
192  std::get<0>(agg_out_ptr_w_idx),
193  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0)),
194  offset);
195  } else {
196  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
197  CHECK_EQ(size_t(0), col_off % chosen_bytes);
198  col_off /= chosen_bytes;
199  agg_col_ptr = LL_BUILDER.CreateGEP(
200  LL_BUILDER.CreateBitCast(
201  std::get<0>(agg_out_ptr_w_idx),
202  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0)),
203  LL_INT(col_off));
204  }
205  }
206 
207  if (chosen_bytes != sizeof(int32_t)) {
208  CHECK_EQ(8, chosen_bytes);
209  if (g_bigint_count) {
210  const auto acc_i64 = LL_BUILDER.CreateBitCast(
211  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
212  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
213  if (gpu_smem_context.isSharedMemoryUsed()) {
214  group_by_and_agg->emitCall(
215  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
216  } else {
217  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
218  acc_i64,
219  LL_INT(int64_t(1)),
220  llvm::AtomicOrdering::Monotonic);
221  }
222  } else {
223  auto acc_i32 = LL_BUILDER.CreateBitCast(
224  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
225  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
226  if (gpu_smem_context.isSharedMemoryUsed()) {
227  acc_i32 = LL_BUILDER.CreatePointerCast(
228  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
229  }
230  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
231  acc_i32,
232  LL_INT(1),
233  llvm::AtomicOrdering::Monotonic);
234  }
235  } else {
236  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
237  if (gpu_smem_context.isSharedMemoryUsed()) {
238  // Atomic operation on address space level 3 (Shared):
239  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
240  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
241  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
242  shared_acc_i32,
243  LL_INT(1),
244  llvm::AtomicOrdering::Monotonic);
245  } else {
246  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
247  acc_i32,
248  LL_INT(1),
249  llvm::AtomicOrdering::Monotonic);
250  }
251  }
252  return;
253  }
254 
255  codegenAggregate(group_by_and_agg,
256  executor,
257  query_mem_desc,
258  co,
259  target_lvs,
260  agg_out_ptr_w_idx,
261  agg_out_vec,
262  output_buffer_byte_stream,
263  out_row_idx,
264  slot_index);
265 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool target_has_geo(const TargetInfo &target_info)
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, int32_t slot_index) const
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define CHECK_GE(x, y)
Definition: Logger.h:210
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
size_t getColOnlyOffInBytes(const size_t col_idx) const
bool is_agg
Definition: TargetInfo.h:40
CHECK(cgen_state)
#define LL_INT(v)
bool g_bigint_count
#define LL_CONTEXT
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ExecutorDeviceType device_type
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:42
const Analyzer::Expr * target_expr
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool is_simple_count(const TargetInfo &target_info)
bool is_geometry() const
Definition: sqltypes.h:420
static void resetWindowFunctionContext(Executor *executor)
int get_physical_coord_cols() const
Definition: sqltypes.h:293
size_t getColOffInBytes(const size_t col_idx) const

+ Here is the call graph for this function:

void TargetExprCodegen::codegenAggregate ( GroupByAndAggregate group_by_and_agg,
Executor executor,
const QueryMemoryDescriptor query_mem_desc,
const CompilationOptions co,
const std::vector< llvm::Value * > &  target_lvs,
const std::tuple< llvm::Value *, llvm::Value * > &  agg_out_ptr_w_idx,
const std::vector< llvm::Value * > &  agg_out_vec,
llvm::Value *  output_buffer_byte_stream,
llvm::Value *  out_row_idx,
int32_t  slot_index 
) const

Definition at line 267 of file TargetExprBuilder.cpp.

References agg_arg(), TargetInfo::agg_arg_type, anonymous_namespace{TargetExprBuilder.cpp}::agg_fn_base_names(), TargetInfo::agg_kind, CHECK(), CHECK_EQ, CHECK_GE, GroupByAndAggregate::checkErrorCode(), GroupByAndAggregate::codegenAggColumnPtr(), GroupByAndAggregate::codegenCountDistinct(), GroupByAndAggregate::convertNullIfAny(), CompilationOptions::device_type, QueryMemoryDescriptor::didOutputColumnar(), GroupByAndAggregate::emitCall(), get_compact_type(), SQLTypeInfo::get_type(), WindowProjectNodeContext::getActiveWindowFunctionContext(), QueryMemoryDescriptor::getColOnlyOffInBytes(), QueryMemoryDescriptor::getLogicalSlotWidthBytes(), QueryMemoryDescriptor::getPaddedSlotWidthBytes(), GPU, is_agg_domain_range_equivalent(), TargetInfo::is_distinct, is_distinct_target(), is_group_by, anonymous_namespace{TargetExprBuilder.cpp}::is_simple_count(), QueryMemoryDescriptor::isLogicalSizedColumnsAllowed(), kAVG, kDOUBLE, kFLOAT, kNULLT, kSINGLE_VALUE, LL_BUILDER, LL_CONTEXT, LL_INT, GroupByAndAggregate::needsUnnestDoublePatch(), numeric_type_name(), patch_agg_fname(), CodeGenerator::posArg(), TargetInfo::skip_null_val, TargetInfo::sql_type, takes_float_argument(), target_expr, anonymous_namespace{TargetExprBuilder.cpp}::target_has_geo(), target_idx, target_info, QueryMemoryDescriptor::threadsShareMemory(), to_string(), and window_function_requires_peer_handling().

Referenced by codegen().

277  {
278  size_t target_lv_idx = 0;
279  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
280 
281  CodeGenerator code_generator(executor);
282 
283  const auto agg_fn_names = agg_fn_base_names(target_info);
284  auto arg_expr = agg_arg(target_expr);
285 
286  for (const auto& agg_base_name : agg_fn_names) {
287  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
288  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
289  sizeof(int64_t));
290  // TODO(miyu): check if buffer may be columnar here
291  CHECK(!query_mem_desc.didOutputColumnar());
292  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
293  uint32_t col_off{0};
294  if (is_group_by) {
295  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
296  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
297  col_off /= sizeof(int64_t);
298  }
299  executor->cgen_state_->emitExternalCall(
300  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
301  llvm::Type::getVoidTy(LL_CONTEXT),
302  {is_group_by
303  ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx), LL_INT(col_off))
304  : agg_out_vec[slot_index],
305  target_lvs[target_lv_idx],
306  code_generator.posArg(arg_expr),
307  elem_ti.is_fp()
308  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
309  : static_cast<llvm::Value*>(
310  executor->cgen_state_->inlineIntNull(elem_ti))});
311  ++slot_index;
312  ++target_lv_idx;
313  continue;
314  }
315 
316  llvm::Value* agg_col_ptr{nullptr};
317  const auto chosen_bytes =
318  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
319  const auto& chosen_type = get_compact_type(target_info);
320  const auto& arg_type =
321  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
325  const bool is_fp_arg =
326  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
327  if (is_group_by) {
328  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
329  out_row_idx,
330  agg_out_ptr_w_idx,
331  query_mem_desc,
332  chosen_bytes,
333  slot_index,
334  target_idx);
335  CHECK(agg_col_ptr);
336  agg_col_ptr->setName("agg_col_ptr");
337  }
338 
339  const bool float_argument_input = takes_float_argument(target_info);
340  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
341  // The count component of an average should never be compacted.
342  const auto agg_chosen_bytes =
343  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
344  if (float_argument_input) {
345  CHECK_GE(chosen_bytes, sizeof(float));
346  }
347 
348  auto target_lv = target_lvs[target_lv_idx];
349  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
350  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
351  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
352  if (!needs_unnest_double_patch) {
353  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
354  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
355  } else if (is_fp_arg) {
356  target_lv = executor->castToFP(target_lv);
357  }
358  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
359  target_lv =
360  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
361  }
362  }
363 
364  const bool is_simple_count_target = is_simple_count(target_info);
365  llvm::Value* str_target_lv{nullptr};
366  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
367  // none encoding string
368  str_target_lv = target_lvs.front();
369  }
370  std::vector<llvm::Value*> agg_args{
371  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
372  (agg_chosen_bytes << 3)),
373  (is_simple_count_target && !arg_expr)
374  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
375  : LL_INT(int64_t(0)))
376  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
377  : target_lv)};
378  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
379  if (is_simple_count_target && arg_expr && str_target_lv) {
380  agg_args[1] =
381  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
382  }
383  }
384  std::string agg_fname{agg_base_name};
385  if (is_fp_arg) {
386  if (!lazy_fetched) {
387  if (agg_chosen_bytes == sizeof(float)) {
388  CHECK_EQ(arg_type.get_type(), kFLOAT);
389  agg_fname += "_float";
390  } else {
391  CHECK_EQ(agg_chosen_bytes, sizeof(double));
392  agg_fname += "_double";
393  }
394  }
395  } else if (agg_chosen_bytes == sizeof(int32_t)) {
396  agg_fname += "_int32";
397  } else if (agg_chosen_bytes == sizeof(int16_t) &&
398  query_mem_desc.didOutputColumnar()) {
399  agg_fname += "_int16";
400  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
401  agg_fname += "_int8";
402  }
403 
405  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
406  CHECK(!chosen_type.is_fp());
407  group_by_and_agg->codegenCountDistinct(
408  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
409  } else {
410  const auto& arg_ti = target_info.agg_arg_type;
411  if (need_skip_null && !arg_ti.is_geometry()) {
412  agg_fname += "_skip_val";
413  }
414 
416  (need_skip_null && !arg_ti.is_geometry())) {
417  llvm::Value* null_in_lv{nullptr};
418  if (arg_ti.is_fp()) {
419  null_in_lv =
420  static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(arg_ti));
421  } else {
422  null_in_lv = static_cast<llvm::Value*>(executor->cgen_state_->inlineIntNull(
424  ? arg_ti
425  : target_info.sql_type));
426  }
427  CHECK(null_in_lv);
428  auto null_lv =
429  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
430  agg_args.push_back(null_lv);
431  }
432  if (!target_info.is_distinct) {
434  query_mem_desc.threadsShareMemory()) {
435  agg_fname += "_shared";
436  if (needs_unnest_double_patch) {
437  agg_fname = patch_agg_fname(agg_fname);
438  }
439  }
440  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
441 
442  if (agg_fname.find("checked") != std::string::npos) {
443  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
444  }
445  }
446  }
447  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
448  if (window_func && window_function_requires_peer_handling(window_func)) {
449  const auto window_func_context =
451  const auto pending_outputs =
452  LL_INT(window_func_context->aggregateStatePendingOutputs());
453  executor->cgen_state_->emitExternalCall("add_window_pending_output",
454  llvm::Type::getVoidTy(LL_CONTEXT),
455  {agg_args.front(), pending_outputs});
456  const auto& window_func_ti = window_func->get_type_info();
457  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
458  switch (window_func_ti.get_type()) {
459  case kFLOAT: {
460  apply_window_pending_outputs_name += "_float";
461  if (query_mem_desc.didOutputColumnar()) {
462  apply_window_pending_outputs_name += "_columnar";
463  }
464  break;
465  }
466  case kDOUBLE: {
467  apply_window_pending_outputs_name += "_double";
468  break;
469  }
470  default: {
471  apply_window_pending_outputs_name += "_int";
472  if (query_mem_desc.didOutputColumnar()) {
473  apply_window_pending_outputs_name +=
474  std::to_string(window_func_ti.get_size() * 8);
475  } else {
476  apply_window_pending_outputs_name += "64";
477  }
478  break;
479  }
480  }
481  const auto partition_end =
482  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
483  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
484  llvm::Type::getVoidTy(LL_CONTEXT),
485  {pending_outputs,
486  target_lvs.front(),
487  partition_end,
488  code_generator.posArg(nullptr)});
489  }
490 
491  ++slot_index;
492  ++target_lv_idx;
493  }
494 }
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool target_has_geo(const TargetInfo &target_info)
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define CHECK_GE(x, y)
Definition: Logger.h:210
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
void checkErrorCode(llvm::Value *retCode)
bool needsUnnestDoublePatch(llvm::Value *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:121
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:257
bool skip_null_val
Definition: TargetInfo.h:44
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
std::string patch_agg_fname(const std::string &agg_name)
size_t getColOnlyOffInBytes(const size_t col_idx) const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
CHECK(cgen_state)
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:117
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
SQLAgg agg_kind
Definition: TargetInfo.h:41
ExecutorDeviceType device_type
const Analyzer::Expr * target_expr
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
bool is_simple_count(const TargetInfo &target_info)
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:137
bool is_distinct
Definition: TargetInfo.h:45
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:72
bool is_agg_domain_range_equivalent(const SQLAgg &agg_kind)
Definition: TargetInfo.h:52

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Member Data Documentation

int32_t TargetExprCodegen::base_slot_index

Definition at line 71 of file TargetExprBuilder.h.

Referenced by codegen().

const Analyzer::Expr* TargetExprCodegen::target_expr

Definition at line 68 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

size_t TargetExprCodegen::target_idx

Definition at line 72 of file TargetExprBuilder.h.

Referenced by codegen(), and codegenAggregate().

TargetInfo TargetExprCodegen::target_info

The documentation for this struct was generated from the following files: