OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprBuilder.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
23 #include "TargetExprBuilder.h"
24 
25 #include "CodeGenerator.h"
26 #include "Execute.h"
27 #include "GroupByAndAggregate.h"
28 #include "Logger/Logger.h"
29 #include "MaxwellCodegenPatch.h"
31 
32 #define LL_CONTEXT executor->cgen_state_->context_
33 #define LL_BUILDER executor->cgen_state_->ir_builder_
34 #define LL_BOOL(v) executor->ll_bool(v)
35 #define LL_INT(v) executor->cgen_state_->llInt(v)
36 #define LL_FP(v) executor->cgen_state_->llFp(v)
37 #define ROW_FUNC executor->cgen_state_->row_func_
38 
39 namespace {
40 
41 inline bool is_varlen_projection(const Analyzer::Expr* target_expr,
42  const SQLTypeInfo& ti) {
43  return dynamic_cast<const Analyzer::GeoExpr*>(target_expr) && ti.get_type() == kPOINT;
44 }
45 
46 std::vector<std::string> agg_fn_base_names(const TargetInfo& target_info,
47  const bool is_varlen_projection) {
48  const auto& chosen_type = get_compact_type(target_info);
49  if (is_varlen_projection) {
50  // TODO: support other types here
51  CHECK(chosen_type.is_geometry());
52  return {"agg_id_varlen"};
53  }
54  if (!target_info.is_agg || target_info.agg_kind == kSAMPLE) {
55  if (chosen_type.is_geometry()) {
56  return std::vector<std::string>(2 * chosen_type.get_physical_coord_cols(),
57  "agg_id");
58  }
59  if (chosen_type.is_varlen()) {
60  // not a varlen projection (not creating new varlen outputs). Just store the pointer
61  // and offset into the input buffer in the output slots.
62  return {"agg_id", "agg_id"};
63  }
64  return {"agg_id"};
65  }
66  switch (target_info.agg_kind) {
67  case kAVG:
68  return {"agg_sum", "agg_count"};
69  case kCOUNT:
70  return {target_info.is_distinct ? "agg_count_distinct" : "agg_count"};
71  case kCOUNT_IF:
72  return {"agg_count_if"};
73  case kMAX:
74  return {"agg_max"};
75  case kMIN:
76  return {"agg_min"};
77  case kSUM:
78  return {"agg_sum"};
79  case kSUM_IF:
80  return {"agg_sum_if"};
82  return {"agg_approximate_count_distinct"};
83  case kAPPROX_QUANTILE:
84  return {"agg_approx_quantile"};
85  case kSINGLE_VALUE:
86  return {"checked_single_agg_id"};
87  case kSAMPLE:
88  return {"agg_id"};
89  case kMODE:
90  return {"agg_mode_func"};
91  default:
92  UNREACHABLE() << "Unrecognized agg kind: " << target_info.agg_kind;
93  }
94  return {};
95 }
96 
98  return (query_mem_desc.getQueryDescriptionType() == QueryDescriptionType::Projection ||
99  query_mem_desc.getQueryDescriptionType() ==
101  query_mem_desc.didOutputColumnar();
102 }
103 
104 bool is_simple_count(const TargetInfo& target_info) {
105  return target_info.is_agg && shared::is_any<kCOUNT>(target_info.agg_kind) &&
106  !target_info.is_distinct;
107 }
108 
109 bool target_has_geo(const TargetInfo& target_info) {
110  return target_info.is_agg ? target_info.agg_arg_type.is_geometry()
111  : target_info.sql_type.is_geometry();
112 }
113 
114 } // namespace
115 
117  GroupByAndAggregate* group_by_and_agg,
118  Executor* executor,
120  const CompilationOptions& co,
121  const GpuSharedMemoryContext& gpu_smem_context,
122  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
123  const std::vector<llvm::Value*>& agg_out_vec,
124  llvm::Value* output_buffer_byte_stream,
125  llvm::Value* out_row_idx,
126  llvm::Value* varlen_output_buffer,
127  DiamondCodegen& diamond_codegen,
128  DiamondCodegen* sample_cfg) const {
129  CHECK(group_by_and_agg);
130  CHECK(executor);
131  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
132 
133  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
134  const auto arg_expr = agg_arg(target_expr);
135 
136  const bool varlen_projection = is_varlen_projection(target_expr, target_info.sql_type);
137  const auto agg_fn_names = agg_fn_base_names(target_info, varlen_projection);
138  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
140  auto target_lvs =
141  window_func
142  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
143  : group_by_and_agg->codegenAggArg(target_expr, co);
144  const auto window_row_ptr = window_func
145  ? group_by_and_agg->codegenWindowRowPointer(
146  window_func, query_mem_desc, co, diamond_codegen)
147  : nullptr;
148  if (window_row_ptr) {
149  agg_out_ptr_w_idx =
150  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
151  if (window_function_is_aggregate(window_func->getKind())) {
152  out_row_idx = window_row_ptr;
153  }
154  }
155 
156  llvm::Value* str_target_lv{nullptr};
157  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
158  // none encoding string, pop the packed pointer + length since
159  // it's only useful for IS NULL checks and assumed to be only
160  // two components (pointer and length) for the purpose of projection
161  str_target_lv = target_lvs.front();
162  target_lvs.erase(target_lvs.begin());
163  }
164  if (target_info.sql_type.is_geometry() && !varlen_projection) {
165  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
166  // array. Ensure that the target values generated match the number of agg
167  // functions before continuing
168  if (target_lvs.size() < agg_fn_names.size()) {
169  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
170  std::vector<llvm::Value*> new_target_lvs;
171  new_target_lvs.reserve(agg_fn_names.size());
172  for (const auto& target_lv : target_lvs) {
173  new_target_lvs.push_back(target_lv);
174  new_target_lvs.push_back(target_lv);
175  }
176  target_lvs = new_target_lvs;
177  }
178  }
179  if (target_lvs.size() < agg_fn_names.size()) {
180  CHECK_EQ(size_t(1), target_lvs.size());
181  CHECK_EQ(size_t(2), agg_fn_names.size());
182  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
183  target_lvs.push_back(target_lvs.front());
184  }
185  } else {
187  if (!target_info.is_agg && !varlen_projection) {
188  CHECK_EQ(static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
189  target_lvs.size());
190  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
191  }
192  } else {
193  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
194  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
195  }
196  }
197 
198  int32_t slot_index = base_slot_index;
199  CHECK_GE(slot_index, 0);
200  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
201 
202  uint32_t col_off{0};
203  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
205  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
206  CHECK_EQ(size_t(1), agg_fn_names.size());
207  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
208  llvm::Value* agg_col_ptr{nullptr};
209  if (is_group_by) {
210  if (query_mem_desc.didOutputColumnar()) {
211  col_off = query_mem_desc.getColOffInBytes(slot_index);
212  CHECK_EQ(size_t(0), col_off % chosen_bytes);
213  col_off /= chosen_bytes;
214  CHECK(std::get<1>(agg_out_ptr_w_idx));
215  auto offset =
216  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
217  auto* bit_cast = LL_BUILDER.CreateBitCast(
218  std::get<0>(agg_out_ptr_w_idx),
219  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
220  agg_col_ptr = LL_BUILDER.CreateGEP(
221  bit_cast->getType()->getScalarType()->getPointerElementType(),
222  bit_cast,
223  offset);
224  } else {
225  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
226  CHECK_EQ(size_t(0), col_off % chosen_bytes);
227  col_off /= chosen_bytes;
228  auto* bit_cast = LL_BUILDER.CreateBitCast(
229  std::get<0>(agg_out_ptr_w_idx),
230  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
231  agg_col_ptr = LL_BUILDER.CreateGEP(
232  bit_cast->getType()->getScalarType()->getPointerElementType(),
233  bit_cast,
234  LL_INT(col_off));
235  }
236  }
237 
238  if (chosen_bytes != sizeof(int32_t)) {
239  CHECK_EQ(8, chosen_bytes);
240  if (g_bigint_count) {
241  const auto acc_i64 = LL_BUILDER.CreateBitCast(
242  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
243  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
244  if (gpu_smem_context.isSharedMemoryUsed()) {
245  group_by_and_agg->emitCall(
246  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
247  } else {
248  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
249  acc_i64,
250  LL_INT(int64_t(1)),
251 #if LLVM_VERSION_MAJOR > 12
252  LLVM_ALIGN(8),
253 #endif
254  llvm::AtomicOrdering::Monotonic);
255  }
256  } else {
257  auto acc_i32 = LL_BUILDER.CreateBitCast(
258  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
259  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
260  if (gpu_smem_context.isSharedMemoryUsed()) {
261  acc_i32 = LL_BUILDER.CreatePointerCast(
262  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
263  }
264  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
265  acc_i32,
266  LL_INT(1),
267 #if LLVM_VERSION_MAJOR > 12
268  LLVM_ALIGN(4),
269 #endif
270  llvm::AtomicOrdering::Monotonic);
271  }
272  } else {
273  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
274  if (gpu_smem_context.isSharedMemoryUsed()) {
275  // Atomic operation on address space level 3 (Shared):
276  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
277  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
278  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
279  shared_acc_i32,
280  LL_INT(1),
281 #if LLVM_VERSION_MAJOR > 12
282  LLVM_ALIGN(4),
283 #endif
284  llvm::AtomicOrdering::Monotonic);
285  } else {
286  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
287  acc_i32,
288  LL_INT(1),
289 #if LLVM_VERSION_MAJOR > 12
290  LLVM_ALIGN(4),
291 #endif
292  llvm::AtomicOrdering::Monotonic);
293  }
294  }
295  return;
296  }
297 
298  codegenAggregate(group_by_and_agg,
299  executor,
300  query_mem_desc,
301  co,
302  target_lvs,
303  agg_out_ptr_w_idx,
304  agg_out_vec,
305  output_buffer_byte_stream,
306  out_row_idx,
307  varlen_output_buffer,
308  slot_index);
309 }
310 
312  GroupByAndAggregate* group_by_and_agg,
313  Executor* executor,
315  const CompilationOptions& co,
316  const std::vector<llvm::Value*>& target_lvs,
317  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
318  const std::vector<llvm::Value*>& agg_out_vec,
319  llvm::Value* output_buffer_byte_stream,
320  llvm::Value* out_row_idx,
321  llvm::Value* varlen_output_buffer,
322  int32_t slot_index) const {
323  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
324  size_t target_lv_idx = 0;
325  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
326 
327  CodeGenerator code_generator(executor);
328 
329  const auto agg_fn_names = agg_fn_base_names(
331  auto arg_expr = agg_arg(target_expr);
332 
333  for (const auto& agg_base_name : agg_fn_names) {
334  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
335  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
336  sizeof(int64_t));
337  // TODO(miyu): check if buffer may be columnar here
338  CHECK(!query_mem_desc.didOutputColumnar());
339  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
340  uint32_t col_off{0};
341  if (is_group_by) {
342  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
343  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
344  col_off /= sizeof(int64_t);
345  }
346  executor->cgen_state_->emitExternalCall(
347  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
348  llvm::Type::getVoidTy(LL_CONTEXT),
349  {is_group_by ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx)
350  ->getType()
351  ->getScalarType()
352  ->getPointerElementType(),
353  std::get<0>(agg_out_ptr_w_idx),
354  LL_INT(col_off))
355  : agg_out_vec[slot_index],
356  target_lvs[target_lv_idx],
357  code_generator.posArg(arg_expr),
358  elem_ti.is_fp()
359  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
360  : static_cast<llvm::Value*>(
361  executor->cgen_state_->inlineIntNull(elem_ti))});
362  ++slot_index;
363  ++target_lv_idx;
364  continue;
365  }
366 
367  llvm::Value* agg_col_ptr{nullptr};
368  const auto chosen_bytes =
369  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
370  const auto& chosen_type = get_compact_type(target_info);
371  const auto& arg_type =
372  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
376  const bool is_fp_arg =
377  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
378  if (is_group_by) {
379  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
380  out_row_idx,
381  agg_out_ptr_w_idx,
382  query_mem_desc,
383  chosen_bytes,
384  slot_index,
385  target_idx);
386  CHECK(agg_col_ptr);
387  agg_col_ptr->setName("agg_col_ptr");
388  }
389 
391  CHECK(!query_mem_desc.didOutputColumnar());
392 
394  CHECK_LT(target_lv_idx, target_lvs.size());
395  CHECK(varlen_output_buffer);
396  auto target_lv = target_lvs[target_lv_idx];
397 
398  std::string agg_fname_suffix = "";
400  query_mem_desc.threadsShareMemory()) {
401  agg_fname_suffix += "_shared";
402  }
403 
404  // first write the varlen data into the varlen buffer and get the pointer location
405  // into the varlen buffer
406  auto& builder = executor->cgen_state_->ir_builder_;
407  auto orig_bb = builder.GetInsertBlock();
408  auto target_ptr_type = llvm::dyn_cast<llvm::PointerType>(target_lv->getType());
409  CHECK(target_ptr_type) << "Varlen projections expect a pointer input.";
410  auto is_nullptr =
411  builder.CreateICmp(llvm::CmpInst::ICMP_EQ,
412  target_lv,
413  llvm::ConstantPointerNull::get(llvm::PointerType::get(
414  target_ptr_type->getPointerElementType(), 0)));
415  llvm::BasicBlock* true_bb{nullptr};
416  {
417  DiamondCodegen nullcheck_diamond(
418  is_nullptr, executor, false, "varlen_null_check", nullptr, false);
419  // maintain a reference to the true bb, overriding the diamond codegen destructor
420  true_bb = nullcheck_diamond.cond_true_;
421  // if not null, process the pointer and insert it into the varlen buffer
422  builder.SetInsertPoint(nullcheck_diamond.cond_false_);
423  auto arr_ptr_lv = executor->cgen_state_->ir_builder_.CreateBitCast(
424  target_lv,
425  llvm::PointerType::get(get_int_type(8, executor->cgen_state_->context_), 0));
426  const int64_t chosen_bytes =
428  auto* arg = get_arg_by_name(ROW_FUNC, "old_total_matched");
429  const auto output_buffer_slot = LL_BUILDER.CreateZExt(
430  LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg),
431  llvm::Type::getInt64Ty(LL_CONTEXT));
432  const auto varlen_buffer_row_sz = query_mem_desc.varlenOutputBufferElemSize();
433  CHECK(varlen_buffer_row_sz);
434  const auto output_buffer_slot_bytes = LL_BUILDER.CreateAdd(
435  LL_BUILDER.CreateMul(output_buffer_slot,
436  executor->cgen_state_->llInt(
437  static_cast<int64_t>(*varlen_buffer_row_sz))),
438  executor->cgen_state_->llInt(static_cast<int64_t>(
439  query_mem_desc.varlenOutputRowSizeToSlot(slot_index))));
440 
441  std::vector<llvm::Value*> varlen_agg_args{
442  executor->castToIntPtrTyIn(varlen_output_buffer, 8),
443  output_buffer_slot_bytes,
444  arr_ptr_lv,
445  executor->cgen_state_->llInt(chosen_bytes)};
446  auto varlen_offset_ptr =
447  group_by_and_agg->emitCall(agg_base_name + agg_fname_suffix, varlen_agg_args);
448 
449  // then write that pointer location into the 64 bit slot in the output buffer
450  auto varlen_offset_int = LL_BUILDER.CreatePtrToInt(
451  varlen_offset_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
452  builder.CreateBr(nullcheck_diamond.cond_true_);
453 
454  // use the true block to do the output buffer insertion regardless of nullness
455  builder.SetInsertPoint(nullcheck_diamond.cond_true_);
456  auto output_phi =
457  builder.CreatePHI(llvm::Type::getInt64Ty(executor->cgen_state_->context_), 2);
458  output_phi->addIncoming(varlen_offset_int, nullcheck_diamond.cond_false_);
459  output_phi->addIncoming(executor->cgen_state_->llInt(static_cast<int64_t>(0)),
460  orig_bb);
461 
462  std::vector<llvm::Value*> agg_args{agg_col_ptr, output_phi};
463  group_by_and_agg->emitCall("agg_id" + agg_fname_suffix, agg_args);
464  }
465  CHECK(true_bb);
466  builder.SetInsertPoint(true_bb);
467 
468  ++slot_index;
469  ++target_lv_idx;
470  continue;
471  }
472 
473  const bool float_argument_input = takes_float_argument(target_info);
474  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
475  // The count component of an average should never be compacted.
476  const auto agg_chosen_bytes =
477  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
478  if (float_argument_input) {
479  CHECK_GE(chosen_bytes, sizeof(float));
480  }
481 
482  auto target_lv = target_lvs[target_lv_idx];
483  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
484  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
485  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
486  if (!needs_unnest_double_patch) {
487  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
488  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
489  } else if (is_fp_arg) {
490  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
491  }
492  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
493  target_lv =
494  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
495  }
496  }
497 
498  const bool is_simple_count_target = is_simple_count(target_info);
499  llvm::Value* str_target_lv{nullptr};
500  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
501  // none encoding string
502  str_target_lv = target_lvs.front();
503  }
504  std::vector<llvm::Value*> agg_args{
505  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
506  (agg_chosen_bytes << 3)),
507  (is_simple_count_target && !arg_expr)
508  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
509  : LL_INT(int64_t(0)))
510  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
511  : target_lv)};
512  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
513  if (is_simple_count_target && arg_expr && str_target_lv) {
514  agg_args[1] =
515  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
516  }
517  }
518  std::string agg_fname{agg_base_name};
519  if (is_fp_arg) {
520  if (!lazy_fetched) {
521  if (agg_chosen_bytes == sizeof(float)) {
522  CHECK_EQ(arg_type.get_type(), kFLOAT);
523  agg_fname += "_float";
524  } else {
525  CHECK_EQ(agg_chosen_bytes, sizeof(double));
526  agg_fname += "_double";
527  }
528  }
529  } else if (agg_chosen_bytes == sizeof(int32_t)) {
530  agg_fname += "_int32";
531  } else if (agg_chosen_bytes == sizeof(int16_t) &&
532  query_mem_desc.didOutputColumnar()) {
533  agg_fname += "_int16";
534  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
535  agg_fname += "_int8";
536  }
537 
539  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
540  CHECK(!chosen_type.is_fp());
541  group_by_and_agg->codegenCountDistinct(
542  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
543  } else if (target_info.agg_kind == kAPPROX_QUANTILE) {
544  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
545  group_by_and_agg->codegenApproxQuantile(
546  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
547  } else if (target_info.agg_kind == kMODE) {
548  group_by_and_agg->codegenMode(
549  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
550  } else {
551  const auto& arg_ti = target_info.agg_arg_type;
552  if (need_skip_null && !arg_ti.is_geometry()) {
553  agg_fname += "_skip_val";
554  }
555 
557  (need_skip_null && !arg_ti.is_geometry())) {
558  llvm::Value* null_in_lv{nullptr};
559  if (arg_ti.is_fp()) {
560  null_in_lv = executor->cgen_state_->inlineFpNull(arg_ti);
561  } else {
562  null_in_lv = executor->cgen_state_->inlineIntNull(
564  ? arg_ti
566  }
567  CHECK(null_in_lv);
568  auto null_lv =
569  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
570  agg_args.push_back(null_lv);
571  }
572  if (target_info.agg_kind == kSUM_IF) {
573  const auto agg_expr = dynamic_cast<const Analyzer::AggExpr*>(target_expr);
574  auto cond_expr_lv =
575  code_generator.codegen(agg_expr->get_arg1().get(), true, co).front();
576  auto cond_lv = executor->codegenConditionalAggregateCondValSelector(
577  cond_expr_lv, kSUM_IF, co);
578  agg_args.push_back(cond_lv);
579  }
580  if (!target_info.is_distinct) {
582  query_mem_desc.threadsShareMemory()) {
583  agg_fname += "_shared";
584  if (needs_unnest_double_patch) {
585  agg_fname = patch_agg_fname(agg_fname);
586  }
587  }
588  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
589 
590  if (agg_fname.find("checked") != std::string::npos) {
591  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
592  }
593  }
594  }
595  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
596  // window function with framing has a different code path and codegen logic
597  if (window_func && !window_func->hasFraming() &&
599  const auto window_func_context =
601  const auto pending_outputs =
602  LL_INT(window_func_context->aggregateStatePendingOutputs());
603  executor->cgen_state_->emitExternalCall("add_window_pending_output",
604  llvm::Type::getVoidTy(LL_CONTEXT),
605  {agg_args.front(), pending_outputs});
606  const auto& window_func_ti = window_func->get_type_info();
607  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
608  switch (window_func_ti.get_type()) {
609  case kFLOAT: {
610  apply_window_pending_outputs_name += "_float";
611  if (query_mem_desc.didOutputColumnar()) {
612  apply_window_pending_outputs_name += "_columnar";
613  }
614  break;
615  }
616  case kDOUBLE: {
617  apply_window_pending_outputs_name += "_double";
618  break;
619  }
620  default: {
621  apply_window_pending_outputs_name += "_int";
622  if (query_mem_desc.didOutputColumnar()) {
623  apply_window_pending_outputs_name +=
624  std::to_string(window_func_ti.get_size() * 8);
625  } else {
626  apply_window_pending_outputs_name += "64";
627  }
628  break;
629  }
630  }
631  const auto partition_end =
632  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
633  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
634  llvm::Type::getVoidTy(LL_CONTEXT),
635  {pending_outputs,
636  target_lvs.front(),
637  partition_end,
638  code_generator.posArg(nullptr)});
639  }
640 
641  ++slot_index;
642  ++target_lv_idx;
643  }
644 }
645 
647  const Executor* executor,
648  QueryMemoryDescriptor& query_mem_desc,
649  const CompilationOptions& co) {
650  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
651  if (query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter) == 0) {
652  CHECK(!dynamic_cast<const Analyzer::AggExpr*>(target_expr));
653  ++slot_index_counter;
654  ++target_index_counter;
655  return;
656  }
657  if (dynamic_cast<const Analyzer::UOper*>(target_expr) &&
658  static_cast<const Analyzer::UOper*>(target_expr)->get_optype() == kUNNEST) {
659  throw std::runtime_error("UNNEST not supported in the projection list yet.");
660  }
661  if ((executor->plan_state_->isLazyFetchColumn(target_expr) || !is_group_by) &&
662  (static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter)) <
663  sizeof(int64_t)) &&
664  !is_columnar_projection(query_mem_desc)) {
665  // TODO(miyu): enable different byte width in the layout w/o padding
667  }
668 
669  if (is_columnar_projection(query_mem_desc) &&
670  executor->plan_state_->isLazyFetchColumn(target_expr)) {
671  // For columnar outputs, we need to pad lazy fetched columns to 8 bytes to allow the
672  // lazy fetch index to be placed in the column. The QueryMemoryDescriptor is created
673  // before Lazy Fetch information is known, therefore we need to update the QMD with
674  // the new slot size width bytes for these columns.
675  query_mem_desc.setPaddedSlotWidthBytes(slot_index_counter, int8_t(8));
676  CHECK_EQ(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter), int8_t(8));
677  }
678 
679  auto target_info = get_target_info(target_expr, g_bigint_count);
680  auto arg_expr = agg_arg(target_expr);
681  if (arg_expr) {
684  target_info.skip_null_val = false;
685  } else if (query_mem_desc.getQueryDescriptionType() ==
687  !arg_expr->get_type_info().is_varlen()) {
688  // TODO: COUNT is currently not null-aware for varlen types. Need to add proper code
689  // generation for handling varlen nulls.
690  target_info.skip_null_val = true;
691  } else if (constrained_not_null(arg_expr, ra_exe_unit.quals)) {
692  target_info.skip_null_val = false;
693  }
694  }
695 
696  if (!(query_mem_desc.getQueryDescriptionType() ==
700  sample_exprs_to_codegen.emplace_back(target_expr,
701  target_info,
702  slot_index_counter,
703  target_index_counter++,
704  is_group_by);
705  } else {
706  target_exprs_to_codegen.emplace_back(target_expr,
707  target_info,
708  slot_index_counter,
709  target_index_counter++,
710  is_group_by);
711  }
712 
713  const auto agg_fn_names = agg_fn_base_names(
715  slot_index_counter += agg_fn_names.size();
716 }
717 
718 namespace {
719 
721  const QueryMemoryDescriptor& query_mem_desc) {
722  const bool is_group_by{query_mem_desc.isGroupBy()};
723  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_string() &&
724  target_info.sql_type.get_compression() != kENCODING_NONE) {
725  return get_agg_initial_val(target_info.agg_kind,
726  target_info.sql_type,
727  is_group_by,
728  query_mem_desc.getCompactByteWidth());
729  }
730  return 0;
731 }
732 
733 } // namespace
734 
736  GroupByAndAggregate* group_by_and_agg,
737  Executor* executor,
738  const QueryMemoryDescriptor& query_mem_desc,
739  const CompilationOptions& co,
740  const GpuSharedMemoryContext& gpu_smem_context,
741  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
742  const std::vector<llvm::Value*>& agg_out_vec,
743  llvm::Value* output_buffer_byte_stream,
744  llvm::Value* out_row_idx,
745  llvm::Value* varlen_output_buffer,
746  DiamondCodegen& diamond_codegen) const {
747  CHECK(group_by_and_agg);
748  CHECK(executor);
749  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
750 
751  for (const auto& target_expr_codegen : target_exprs_to_codegen) {
752  target_expr_codegen.codegen(group_by_and_agg,
753  executor,
754  query_mem_desc,
755  co,
756  gpu_smem_context,
757  agg_out_ptr_w_idx,
758  agg_out_vec,
759  output_buffer_byte_stream,
760  out_row_idx,
761  varlen_output_buffer,
762  diamond_codegen);
763  }
764  if (!sample_exprs_to_codegen.empty()) {
765  codegenSampleExpressions(group_by_and_agg,
766  executor,
767  query_mem_desc,
768  co,
769  agg_out_ptr_w_idx,
770  agg_out_vec,
771  output_buffer_byte_stream,
772  out_row_idx,
773  diamond_codegen);
774  }
775 }
776 
778  GroupByAndAggregate* group_by_and_agg,
779  Executor* executor,
780  const QueryMemoryDescriptor& query_mem_desc,
781  const CompilationOptions& co,
782  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
783  const std::vector<llvm::Value*>& agg_out_vec,
784  llvm::Value* output_buffer_byte_stream,
785  llvm::Value* out_row_idx,
786  DiamondCodegen& diamond_codegen) const {
787  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
788  CHECK(!sample_exprs_to_codegen.empty());
790  if (sample_exprs_to_codegen.size() == 1 &&
791  !sample_exprs_to_codegen.front().target_info.sql_type.is_varlen()) {
792  codegenSingleSlotSampleExpression(group_by_and_agg,
793  executor,
794  query_mem_desc,
795  co,
796  agg_out_ptr_w_idx,
797  agg_out_vec,
798  output_buffer_byte_stream,
799  out_row_idx,
800  diamond_codegen);
801  } else {
802  codegenMultiSlotSampleExpressions(group_by_and_agg,
803  executor,
804  query_mem_desc,
805  co,
806  agg_out_ptr_w_idx,
807  agg_out_vec,
808  output_buffer_byte_stream,
809  out_row_idx,
810  diamond_codegen);
811  }
812 }
813 
815  GroupByAndAggregate* group_by_and_agg,
816  Executor* executor,
817  const QueryMemoryDescriptor& query_mem_desc,
818  const CompilationOptions& co,
819  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
820  const std::vector<llvm::Value*>& agg_out_vec,
821  llvm::Value* output_buffer_byte_stream,
822  llvm::Value* out_row_idx,
823  DiamondCodegen& diamond_codegen) const {
824  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
825  CHECK_EQ(size_t(1), sample_exprs_to_codegen.size());
826  CHECK(!sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
828  // no need for the atomic if we only have one SAMPLE target
829  sample_exprs_to_codegen.front().codegen(group_by_and_agg,
830  executor,
831  query_mem_desc,
832  co,
833  {},
834  agg_out_ptr_w_idx,
835  agg_out_vec,
836  output_buffer_byte_stream,
837  out_row_idx,
838  /*varlen_output_buffer=*/nullptr,
839  diamond_codegen);
840 }
841 
843  GroupByAndAggregate* group_by_and_agg,
844  Executor* executor,
845  const QueryMemoryDescriptor& query_mem_desc,
846  const CompilationOptions& co,
847  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
848  const std::vector<llvm::Value*>& agg_out_vec,
849  llvm::Value* output_buffer_byte_stream,
850  llvm::Value* out_row_idx,
851  DiamondCodegen& diamond_codegen) const {
852  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
853  CHECK(sample_exprs_to_codegen.size() > 1 ||
854  sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
856  const auto& first_sample_expr = sample_exprs_to_codegen.front();
857  auto target_lvs = group_by_and_agg->codegenAggArg(first_sample_expr.target_expr, co);
858  CHECK_GE(target_lvs.size(), size_t(1));
859 
860  const auto init_val =
861  get_initial_agg_val(first_sample_expr.target_info, query_mem_desc);
862 
863  llvm::Value* agg_col_ptr{nullptr};
864  if (is_group_by) {
865  const auto agg_column_size_bytes =
866  query_mem_desc.isLogicalSizedColumnsAllowed() &&
867  !first_sample_expr.target_info.sql_type.is_varlen()
868  ? first_sample_expr.target_info.sql_type.get_size()
869  : sizeof(int64_t);
870  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
871  out_row_idx,
872  agg_out_ptr_w_idx,
873  query_mem_desc,
874  agg_column_size_bytes,
875  first_sample_expr.base_slot_index,
876  first_sample_expr.target_idx);
877  } else {
878  CHECK_LT(static_cast<size_t>(first_sample_expr.base_slot_index), agg_out_vec.size());
879  agg_col_ptr =
880  executor->castToIntPtrTyIn(agg_out_vec[first_sample_expr.base_slot_index], 64);
881  }
882 
883  auto sample_cas_lv =
884  codegenSlotEmptyKey(agg_col_ptr, target_lvs, executor, query_mem_desc, init_val);
885 
886  DiamondCodegen sample_cfg(
887  sample_cas_lv, executor, false, "sample_valcheck", &diamond_codegen, false);
888 
889  for (const auto& target_expr_codegen : sample_exprs_to_codegen) {
890  target_expr_codegen.codegen(group_by_and_agg,
891  executor,
892  query_mem_desc,
893  co,
894  {},
895  agg_out_ptr_w_idx,
896  agg_out_vec,
897  output_buffer_byte_stream,
898  out_row_idx,
899  /*varlen_output_buffer=*/nullptr,
900  diamond_codegen,
901  &sample_cfg);
902  }
903 }
904 
906  llvm::Value* agg_col_ptr,
907  std::vector<llvm::Value*>& target_lvs,
908  Executor* executor,
909  const QueryMemoryDescriptor& query_mem_desc,
910  const int64_t init_val) const {
911  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
912  const auto& first_sample_expr = sample_exprs_to_codegen.front();
913  const auto first_sample_slot_bytes =
914  first_sample_expr.target_info.sql_type.is_varlen()
915  ? sizeof(int64_t)
916  : first_sample_expr.target_info.sql_type.get_size();
917  llvm::Value* target_lv_casted{nullptr};
918  // deciding whether proper casting is required for the first sample's slot:
919  if (first_sample_expr.target_info.sql_type.is_varlen()) {
920  target_lv_casted =
921  LL_BUILDER.CreatePtrToInt(target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
922  } else if (first_sample_expr.target_info.sql_type.is_fp()) {
923  // Initialization value for SAMPLE on a float column should be 0
924  CHECK_EQ(init_val, 0);
925  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
926  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
927  target_lvs.front(),
928  first_sample_slot_bytes == sizeof(float) ? llvm::Type::getInt32Ty(LL_CONTEXT)
929  : llvm::Type::getInt64Ty(LL_CONTEXT));
930  } else {
931  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
932  target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
933  }
934  } else if (first_sample_slot_bytes != sizeof(int64_t) &&
935  !query_mem_desc.isLogicalSizedColumnsAllowed()) {
936  target_lv_casted =
937  executor->cgen_state_->ir_builder_.CreateCast(llvm::Instruction::CastOps::SExt,
938  target_lvs.front(),
939  llvm::Type::getInt64Ty(LL_CONTEXT));
940  } else {
941  target_lv_casted = target_lvs.front();
942  }
943 
944  std::string slot_empty_cas_func_name("slotEmptyKeyCAS");
945  llvm::Value* init_val_lv{LL_INT(init_val)};
946  if (query_mem_desc.isLogicalSizedColumnsAllowed() &&
947  !first_sample_expr.target_info.sql_type.is_varlen()) {
948  // add proper suffix to the function name:
949  switch (first_sample_slot_bytes) {
950  case 1:
951  slot_empty_cas_func_name += "_int8";
952  break;
953  case 2:
954  slot_empty_cas_func_name += "_int16";
955  break;
956  case 4:
957  slot_empty_cas_func_name += "_int32";
958  break;
959  case 8:
960  break;
961  default:
962  UNREACHABLE() << "Invalid slot size for slotEmptyKeyCAS function.";
963  break;
964  }
965  if (first_sample_slot_bytes != sizeof(int64_t)) {
966  init_val_lv = llvm::ConstantInt::get(
967  get_int_type(first_sample_slot_bytes * 8, LL_CONTEXT), init_val);
968  }
969  }
970 
971  auto sample_cas_lv = executor->cgen_state_->emitExternalCall(
972  slot_empty_cas_func_name,
973  llvm::Type::getInt1Ty(executor->cgen_state_->context_),
974  {agg_col_ptr, target_lv_casted, init_val_lv});
975  return sample_cas_lv;
976 }
size_t varlenOutputRowSizeToSlot(const size_t slot_idx) const
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:301
bool target_has_geo(const TargetInfo &target_info)
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
bool isLogicalSizedColumnsAllowed() const
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen, DiamondCodegen *sample_cfg=nullptr) const
void codegenMultiSlotSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
llvm::Value * posArg(const Analyzer::Expr *) const
Definition: ColumnIR.cpp:580
bool is_agg_domain_range_equivalent(const SQLAgg agg_kind)
Definition: TargetInfo.h:79
#define UNREACHABLE()
Definition: Logger.h:337
#define CHECK_GE(x, y)
Definition: Logger.h:306
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:102
#define LLVM_ALIGN(alignment)
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:54
llvm::BasicBlock * cond_true_
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:88
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
std::string patch_agg_fname(const std::string &agg_name)
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
Definition: sqldefs.h:75
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool is_agg
Definition: TargetInfo.h:50
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:167
void operator()(const Analyzer::Expr *target_expr, const Executor *executor, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
void codegenSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
bool g_bigint_count
Definition: sqldefs.h:77
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:98
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:51
QueryDescriptionType getQueryDescriptionType() const
ExecutorDeviceType device_type
std::optional< size_t > varlenOutputBufferElemSize() const
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
Definition: IRCodegen.cpp:30
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
#define CHECK_LT(x, y)
Definition: Logger.h:303
llvm::Value * codegenSlotEmptyKey(llvm::Value *agg_col_ptr, std::vector< llvm::Value * > &target_lvs, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const int64_t init_val) const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:389
const Analyzer::Expr * target_expr
Definition: sqldefs.h:78
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
TO bit_cast(FROM &&from)
Definition: misc.h:298
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:291
bool is_geometry() const
Definition: sqltypes.h:592
static void resetWindowFunctionContext(Executor *executor)
void setPaddedSlotWidthBytes(const size_t slot_idx, const int8_t bytes)
int64_t get_initial_agg_val(const TargetInfo &target_info, const QueryMemoryDescriptor &query_mem_desc)
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:209
bool is_string() const
Definition: sqltypes.h:580
bool is_distinct
Definition: TargetInfo.h:55
Definition: sqldefs.h:76
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
int get_physical_coord_cols() const
Definition: sqltypes.h:433
Definition: sqldefs.h:74
size_t getColOffInBytes(const size_t col_idx) const
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const
bool is_columnar_projection(const QueryMemoryDescriptor &query_mem_desc)
Definition: sqldefs.h:83
#define ROW_FUNC
void codegenSingleSlotSampleExpression(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const