OmniSciDB  cde582ebc3
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TargetExprBuilder.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
23 #include "TargetExprBuilder.h"
24 
25 #include "CodeGenerator.h"
26 #include "Execute.h"
27 #include "GroupByAndAggregate.h"
28 #include "Logger/Logger.h"
29 #include "MaxwellCodegenPatch.h"
31 
32 #define LL_CONTEXT executor->cgen_state_->context_
33 #define LL_BUILDER executor->cgen_state_->ir_builder_
34 #define LL_BOOL(v) executor->ll_bool(v)
35 #define LL_INT(v) executor->cgen_state_->llInt(v)
36 #define LL_FP(v) executor->cgen_state_->llFp(v)
37 #define ROW_FUNC executor->cgen_state_->row_func_
38 
39 namespace {
40 
41 inline bool is_varlen_projection(const Analyzer::Expr* target_expr,
42  const SQLTypeInfo& ti) {
43  return dynamic_cast<const Analyzer::GeoExpr*>(target_expr) && ti.get_type() == kPOINT;
44 }
45 
46 std::vector<std::string> agg_fn_base_names(const TargetInfo& target_info,
47  const bool is_varlen_projection) {
48  const auto& chosen_type = get_compact_type(target_info);
49  if (is_varlen_projection) {
50  // TODO: support other types here
51  CHECK(chosen_type.is_geometry());
52  return {"agg_id_varlen"};
53  }
54  if (!target_info.is_agg || target_info.agg_kind == kSAMPLE) {
55  if (chosen_type.is_geometry()) {
56  return std::vector<std::string>(2 * chosen_type.get_physical_coord_cols(),
57  "agg_id");
58  }
59  if (chosen_type.is_varlen()) {
60  // not a varlen projection (not creating new varlen outputs). Just store the pointer
61  // and offset into the input buffer in the output slots.
62  return {"agg_id", "agg_id"};
63  }
64  return {"agg_id"};
65  }
66  switch (target_info.agg_kind) {
67  case kAVG:
68  return {"agg_sum", "agg_count"};
69  case kCOUNT:
70  return {target_info.is_distinct ? "agg_count_distinct" : "agg_count"};
71  case kMAX:
72  return {"agg_max"};
73  case kMIN:
74  return {"agg_min"};
75  case kSUM:
76  return {"agg_sum"};
78  return {"agg_approximate_count_distinct"};
79  case kAPPROX_QUANTILE:
80  return {"agg_approx_quantile"};
81  case kSINGLE_VALUE:
82  return {"checked_single_agg_id"};
83  case kSAMPLE:
84  return {"agg_id"};
85  default:
86  UNREACHABLE() << "Unrecognized agg kind: " << std::to_string(target_info.agg_kind);
87  }
88  return {};
89 }
90 
92  return (query_mem_desc.getQueryDescriptionType() == QueryDescriptionType::Projection ||
93  query_mem_desc.getQueryDescriptionType() ==
95  query_mem_desc.didOutputColumnar();
96 }
97 
98 bool is_simple_count(const TargetInfo& target_info) {
99  return target_info.is_agg && target_info.agg_kind == kCOUNT && !target_info.is_distinct;
100 }
101 
102 bool target_has_geo(const TargetInfo& target_info) {
103  return target_info.is_agg ? target_info.agg_arg_type.is_geometry()
104  : target_info.sql_type.is_geometry();
105 }
106 
107 } // namespace
108 
110  GroupByAndAggregate* group_by_and_agg,
111  Executor* executor,
113  const CompilationOptions& co,
114  const GpuSharedMemoryContext& gpu_smem_context,
115  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
116  const std::vector<llvm::Value*>& agg_out_vec,
117  llvm::Value* output_buffer_byte_stream,
118  llvm::Value* out_row_idx,
119  llvm::Value* varlen_output_buffer,
120  DiamondCodegen& diamond_codegen,
121  DiamondCodegen* sample_cfg) const {
122  CHECK(group_by_and_agg);
123  CHECK(executor);
124  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
125 
126  auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
127  const auto arg_expr = agg_arg(target_expr);
128 
129  const bool varlen_projection = is_varlen_projection(target_expr, target_info.sql_type);
130  const auto agg_fn_names = agg_fn_base_names(target_info, varlen_projection);
131  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
133  auto target_lvs =
134  window_func
135  ? std::vector<llvm::Value*>{executor->codegenWindowFunction(target_idx, co)}
136  : group_by_and_agg->codegenAggArg(target_expr, co);
137  const auto window_row_ptr = window_func
138  ? group_by_and_agg->codegenWindowRowPointer(
139  window_func, query_mem_desc, co, diamond_codegen)
140  : nullptr;
141  if (window_row_ptr) {
142  agg_out_ptr_w_idx =
143  std::make_tuple(window_row_ptr, std::get<1>(agg_out_ptr_w_idx_in));
144  if (window_function_is_aggregate(window_func->getKind())) {
145  out_row_idx = window_row_ptr;
146  }
147  }
148 
149  llvm::Value* str_target_lv{nullptr};
150  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
151  // none encoding string, pop the packed pointer + length since
152  // it's only useful for IS NULL checks and assumed to be only
153  // two components (pointer and length) for the purpose of projection
154  str_target_lv = target_lvs.front();
155  target_lvs.erase(target_lvs.begin());
156  }
157  if (target_info.sql_type.is_geometry() && !varlen_projection) {
158  // Geo cols are expanded to the physical coord cols. Each physical coord col is an
159  // array. Ensure that the target values generated match the number of agg
160  // functions before continuing
161  if (target_lvs.size() < agg_fn_names.size()) {
162  CHECK_EQ(target_lvs.size(), agg_fn_names.size() / 2);
163  std::vector<llvm::Value*> new_target_lvs;
164  new_target_lvs.reserve(agg_fn_names.size());
165  for (const auto& target_lv : target_lvs) {
166  new_target_lvs.push_back(target_lv);
167  new_target_lvs.push_back(target_lv);
168  }
169  target_lvs = new_target_lvs;
170  }
171  }
172  if (target_lvs.size() < agg_fn_names.size()) {
173  CHECK_EQ(size_t(1), target_lvs.size());
174  CHECK_EQ(size_t(2), agg_fn_names.size());
175  for (size_t i = 1; i < agg_fn_names.size(); ++i) {
176  target_lvs.push_back(target_lvs.front());
177  }
178  } else {
180  if (!target_info.is_agg && !varlen_projection) {
181  CHECK_EQ(static_cast<size_t>(2 * target_info.sql_type.get_physical_coord_cols()),
182  target_lvs.size());
183  CHECK_EQ(agg_fn_names.size(), target_lvs.size());
184  }
185  } else {
186  CHECK(str_target_lv || (agg_fn_names.size() == target_lvs.size()));
187  CHECK(target_lvs.size() == 1 || target_lvs.size() == 2);
188  }
189  }
190 
191  int32_t slot_index = base_slot_index;
192  CHECK_GE(slot_index, 0);
193  CHECK(is_group_by || static_cast<size_t>(slot_index) < agg_out_vec.size());
194 
195  uint32_t col_off{0};
196  if (co.device_type == ExecutorDeviceType::GPU && query_mem_desc.threadsShareMemory() &&
198  (!arg_expr || arg_expr->get_type_info().get_notnull())) {
199  CHECK_EQ(size_t(1), agg_fn_names.size());
200  const auto chosen_bytes = query_mem_desc.getPaddedSlotWidthBytes(slot_index);
201  llvm::Value* agg_col_ptr{nullptr};
202  if (is_group_by) {
203  if (query_mem_desc.didOutputColumnar()) {
204  col_off = query_mem_desc.getColOffInBytes(slot_index);
205  CHECK_EQ(size_t(0), col_off % chosen_bytes);
206  col_off /= chosen_bytes;
207  CHECK(std::get<1>(agg_out_ptr_w_idx));
208  auto offset =
209  LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx), LL_INT(col_off));
210  auto* bit_cast = LL_BUILDER.CreateBitCast(
211  std::get<0>(agg_out_ptr_w_idx),
212  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
213  agg_col_ptr = LL_BUILDER.CreateGEP(
214  bit_cast->getType()->getScalarType()->getPointerElementType(),
215  bit_cast,
216  offset);
217  } else {
218  col_off = query_mem_desc.getColOnlyOffInBytes(slot_index);
219  CHECK_EQ(size_t(0), col_off % chosen_bytes);
220  col_off /= chosen_bytes;
221  auto* bit_cast = LL_BUILDER.CreateBitCast(
222  std::get<0>(agg_out_ptr_w_idx),
223  llvm::PointerType::get(get_int_type((chosen_bytes << 3), LL_CONTEXT), 0));
224  agg_col_ptr = LL_BUILDER.CreateGEP(
225  bit_cast->getType()->getScalarType()->getPointerElementType(),
226  bit_cast,
227  LL_INT(col_off));
228  }
229  }
230 
231  if (chosen_bytes != sizeof(int32_t)) {
232  CHECK_EQ(8, chosen_bytes);
233  if (g_bigint_count) {
234  const auto acc_i64 = LL_BUILDER.CreateBitCast(
235  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
236  llvm::PointerType::get(get_int_type(64, LL_CONTEXT), 0));
237  if (gpu_smem_context.isSharedMemoryUsed()) {
238  group_by_and_agg->emitCall(
239  "agg_count_shared", std::vector<llvm::Value*>{acc_i64, LL_INT(int64_t(1))});
240  } else {
241  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
242  acc_i64,
243  LL_INT(int64_t(1)),
244 #if LLVM_VERSION_MAJOR > 12
245  LLVM_ALIGN(8),
246 #endif
247  llvm::AtomicOrdering::Monotonic);
248  }
249  } else {
250  auto acc_i32 = LL_BUILDER.CreateBitCast(
251  is_group_by ? agg_col_ptr : agg_out_vec[slot_index],
252  llvm::PointerType::get(get_int_type(32, LL_CONTEXT), 0));
253  if (gpu_smem_context.isSharedMemoryUsed()) {
254  acc_i32 = LL_BUILDER.CreatePointerCast(
255  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
256  }
257  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
258  acc_i32,
259  LL_INT(1),
260 #if LLVM_VERSION_MAJOR > 12
261  LLVM_ALIGN(4),
262 #endif
263  llvm::AtomicOrdering::Monotonic);
264  }
265  } else {
266  const auto acc_i32 = (is_group_by ? agg_col_ptr : agg_out_vec[slot_index]);
267  if (gpu_smem_context.isSharedMemoryUsed()) {
268  // Atomic operation on address space level 3 (Shared):
269  const auto shared_acc_i32 = LL_BUILDER.CreatePointerCast(
270  acc_i32, llvm::Type::getInt32PtrTy(LL_CONTEXT, 3));
271  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
272  shared_acc_i32,
273  LL_INT(1),
274 #if LLVM_VERSION_MAJOR > 12
275  LLVM_ALIGN(4),
276 #endif
277  llvm::AtomicOrdering::Monotonic);
278  } else {
279  LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
280  acc_i32,
281  LL_INT(1),
282 #if LLVM_VERSION_MAJOR > 12
283  LLVM_ALIGN(4),
284 #endif
285  llvm::AtomicOrdering::Monotonic);
286  }
287  }
288  return;
289  }
290 
291  codegenAggregate(group_by_and_agg,
292  executor,
293  query_mem_desc,
294  co,
295  target_lvs,
296  agg_out_ptr_w_idx,
297  agg_out_vec,
298  output_buffer_byte_stream,
299  out_row_idx,
300  varlen_output_buffer,
301  slot_index);
302 }
303 
305  GroupByAndAggregate* group_by_and_agg,
306  Executor* executor,
308  const CompilationOptions& co,
309  const std::vector<llvm::Value*>& target_lvs,
310  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
311  const std::vector<llvm::Value*>& agg_out_vec,
312  llvm::Value* output_buffer_byte_stream,
313  llvm::Value* out_row_idx,
314  llvm::Value* varlen_output_buffer,
315  int32_t slot_index) const {
316  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
317  size_t target_lv_idx = 0;
318  const bool lazy_fetched{executor->plan_state_->isLazyFetchColumn(target_expr)};
319 
320  CodeGenerator code_generator(executor);
321 
322  const auto agg_fn_names = agg_fn_base_names(
324  auto arg_expr = agg_arg(target_expr);
325 
326  for (const auto& agg_base_name : agg_fn_names) {
327  if (target_info.is_distinct && arg_expr->get_type_info().is_array()) {
328  CHECK_EQ(static_cast<size_t>(query_mem_desc.getLogicalSlotWidthBytes(slot_index)),
329  sizeof(int64_t));
330  // TODO(miyu): check if buffer may be columnar here
331  CHECK(!query_mem_desc.didOutputColumnar());
332  const auto& elem_ti = arg_expr->get_type_info().get_elem_type();
333  uint32_t col_off{0};
334  if (is_group_by) {
335  const auto col_off_in_bytes = query_mem_desc.getColOnlyOffInBytes(slot_index);
336  CHECK_EQ(size_t(0), col_off_in_bytes % sizeof(int64_t));
337  col_off /= sizeof(int64_t);
338  }
339  executor->cgen_state_->emitExternalCall(
340  "agg_count_distinct_array_" + numeric_type_name(elem_ti),
341  llvm::Type::getVoidTy(LL_CONTEXT),
342  {is_group_by ? LL_BUILDER.CreateGEP(std::get<0>(agg_out_ptr_w_idx)
343  ->getType()
344  ->getScalarType()
345  ->getPointerElementType(),
346  std::get<0>(agg_out_ptr_w_idx),
347  LL_INT(col_off))
348  : agg_out_vec[slot_index],
349  target_lvs[target_lv_idx],
350  code_generator.posArg(arg_expr),
351  elem_ti.is_fp()
352  ? static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(elem_ti))
353  : static_cast<llvm::Value*>(
354  executor->cgen_state_->inlineIntNull(elem_ti))});
355  ++slot_index;
356  ++target_lv_idx;
357  continue;
358  }
359 
360  llvm::Value* agg_col_ptr{nullptr};
361  const auto chosen_bytes =
362  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index));
363  const auto& chosen_type = get_compact_type(target_info);
364  const auto& arg_type =
365  ((arg_expr && arg_expr->get_type_info().get_type() != kNULLT) &&
369  const bool is_fp_arg =
370  !lazy_fetched && arg_type.get_type() != kNULLT && arg_type.is_fp();
371  if (is_group_by) {
372  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
373  out_row_idx,
374  agg_out_ptr_w_idx,
375  query_mem_desc,
376  chosen_bytes,
377  slot_index,
378  target_idx);
379  CHECK(agg_col_ptr);
380  agg_col_ptr->setName("agg_col_ptr");
381  }
382 
384  CHECK(!query_mem_desc.didOutputColumnar());
385 
387  CHECK_LT(target_lv_idx, target_lvs.size());
388  CHECK(varlen_output_buffer);
389  auto target_lv = target_lvs[target_lv_idx];
390 
391  std::string agg_fname_suffix = "";
393  query_mem_desc.threadsShareMemory()) {
394  agg_fname_suffix += "_shared";
395  }
396 
397  // first write the varlen data into the varlen buffer and get the pointer location
398  // into the varlen buffer
399  auto& builder = executor->cgen_state_->ir_builder_;
400  auto orig_bb = builder.GetInsertBlock();
401  auto target_ptr_type = llvm::dyn_cast<llvm::PointerType>(target_lv->getType());
402  CHECK(target_ptr_type) << "Varlen projections expect a pointer input.";
403  auto is_nullptr =
404  builder.CreateICmp(llvm::CmpInst::ICMP_EQ,
405  target_lv,
406  llvm::ConstantPointerNull::get(llvm::PointerType::get(
407  target_ptr_type->getPointerElementType(), 0)));
408  llvm::BasicBlock* true_bb{nullptr};
409  {
410  DiamondCodegen nullcheck_diamond(
411  is_nullptr, executor, false, "varlen_null_check", nullptr, false);
412  // maintain a reference to the true bb, overriding the diamond codegen destructor
413  true_bb = nullcheck_diamond.cond_true_;
414  // if not null, process the pointer and insert it into the varlen buffer
415  builder.SetInsertPoint(nullcheck_diamond.cond_false_);
416  auto arr_ptr_lv = executor->cgen_state_->ir_builder_.CreateBitCast(
417  target_lv,
418  llvm::PointerType::get(get_int_type(8, executor->cgen_state_->context_), 0));
419  const int64_t chosen_bytes =
421  auto* arg = get_arg_by_name(ROW_FUNC, "old_total_matched");
422  const auto output_buffer_slot = LL_BUILDER.CreateZExt(
423  LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg),
424  llvm::Type::getInt64Ty(LL_CONTEXT));
425  const auto varlen_buffer_row_sz = query_mem_desc.varlenOutputBufferElemSize();
426  CHECK(varlen_buffer_row_sz);
427  const auto output_buffer_slot_bytes = LL_BUILDER.CreateAdd(
428  LL_BUILDER.CreateMul(output_buffer_slot,
429  executor->cgen_state_->llInt(
430  static_cast<int64_t>(*varlen_buffer_row_sz))),
431  executor->cgen_state_->llInt(static_cast<int64_t>(
432  query_mem_desc.varlenOutputRowSizeToSlot(slot_index))));
433 
434  std::vector<llvm::Value*> varlen_agg_args{
435  executor->castToIntPtrTyIn(varlen_output_buffer, 8),
436  output_buffer_slot_bytes,
437  arr_ptr_lv,
438  executor->cgen_state_->llInt(chosen_bytes)};
439  auto varlen_offset_ptr =
440  group_by_and_agg->emitCall(agg_base_name + agg_fname_suffix, varlen_agg_args);
441 
442  // then write that pointer location into the 64 bit slot in the output buffer
443  auto varlen_offset_int = LL_BUILDER.CreatePtrToInt(
444  varlen_offset_ptr, llvm::Type::getInt64Ty(LL_CONTEXT));
445  builder.CreateBr(nullcheck_diamond.cond_true_);
446 
447  // use the true block to do the output buffer insertion regardless of nullness
448  builder.SetInsertPoint(nullcheck_diamond.cond_true_);
449  auto output_phi =
450  builder.CreatePHI(llvm::Type::getInt64Ty(executor->cgen_state_->context_), 2);
451  output_phi->addIncoming(varlen_offset_int, nullcheck_diamond.cond_false_);
452  output_phi->addIncoming(executor->cgen_state_->llInt(static_cast<int64_t>(0)),
453  orig_bb);
454 
455  std::vector<llvm::Value*> agg_args{agg_col_ptr, output_phi};
456  group_by_and_agg->emitCall("agg_id" + agg_fname_suffix, agg_args);
457  }
458  CHECK(true_bb);
459  builder.SetInsertPoint(true_bb);
460 
461  ++slot_index;
462  ++target_lv_idx;
463  continue;
464  }
465 
466  const bool float_argument_input = takes_float_argument(target_info);
467  const bool is_count_in_avg = target_info.agg_kind == kAVG && target_lv_idx == 1;
468  // The count component of an average should never be compacted.
469  const auto agg_chosen_bytes =
470  float_argument_input && !is_count_in_avg ? sizeof(float) : chosen_bytes;
471  if (float_argument_input) {
472  CHECK_GE(chosen_bytes, sizeof(float));
473  }
474 
475  auto target_lv = target_lvs[target_lv_idx];
476  const auto needs_unnest_double_patch = group_by_and_agg->needsUnnestDoublePatch(
477  target_lv, agg_base_name, query_mem_desc.threadsShareMemory(), co);
478  const auto need_skip_null = !needs_unnest_double_patch && target_info.skip_null_val;
479  if (!needs_unnest_double_patch) {
480  if (need_skip_null && !is_agg_domain_range_equivalent(target_info.agg_kind)) {
481  target_lv = group_by_and_agg->convertNullIfAny(arg_type, target_info, target_lv);
482  } else if (is_fp_arg) {
483  target_lv = executor->castToFP(target_lv, arg_type, target_info.sql_type);
484  }
485  if (!dynamic_cast<const Analyzer::AggExpr*>(target_expr) || arg_expr) {
486  target_lv =
487  executor->cgen_state_->castToTypeIn(target_lv, (agg_chosen_bytes << 3));
488  }
489  }
490 
491  const bool is_simple_count_target = is_simple_count(target_info);
492  llvm::Value* str_target_lv{nullptr};
493  if (target_lvs.size() == 3 && !target_has_geo(target_info)) {
494  // none encoding string
495  str_target_lv = target_lvs.front();
496  }
497  std::vector<llvm::Value*> agg_args{
498  executor->castToIntPtrTyIn((is_group_by ? agg_col_ptr : agg_out_vec[slot_index]),
499  (agg_chosen_bytes << 3)),
500  (is_simple_count_target && !arg_expr)
501  ? (agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0))
502  : LL_INT(int64_t(0)))
503  : (is_simple_count_target && arg_expr && str_target_lv ? str_target_lv
504  : target_lv)};
505  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
506  if (is_simple_count_target && arg_expr && str_target_lv) {
507  agg_args[1] =
508  agg_chosen_bytes == sizeof(int32_t) ? LL_INT(int32_t(0)) : LL_INT(int64_t(0));
509  }
510  }
511  std::string agg_fname{agg_base_name};
512  if (is_fp_arg) {
513  if (!lazy_fetched) {
514  if (agg_chosen_bytes == sizeof(float)) {
515  CHECK_EQ(arg_type.get_type(), kFLOAT);
516  agg_fname += "_float";
517  } else {
518  CHECK_EQ(agg_chosen_bytes, sizeof(double));
519  agg_fname += "_double";
520  }
521  }
522  } else if (agg_chosen_bytes == sizeof(int32_t)) {
523  agg_fname += "_int32";
524  } else if (agg_chosen_bytes == sizeof(int16_t) &&
525  query_mem_desc.didOutputColumnar()) {
526  agg_fname += "_int16";
527  } else if (agg_chosen_bytes == sizeof(int8_t) && query_mem_desc.didOutputColumnar()) {
528  agg_fname += "_int8";
529  }
530 
532  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
533  CHECK(!chosen_type.is_fp());
534  group_by_and_agg->codegenCountDistinct(
535  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
536  } else if (target_info.agg_kind == kAPPROX_QUANTILE) {
537  CHECK_EQ(agg_chosen_bytes, sizeof(int64_t));
538  group_by_and_agg->codegenApproxQuantile(
539  target_idx, target_expr, agg_args, query_mem_desc, co.device_type);
540  } else {
541  const auto& arg_ti = target_info.agg_arg_type;
542  if (need_skip_null && !arg_ti.is_geometry()) {
543  agg_fname += "_skip_val";
544  }
545 
547  (need_skip_null && !arg_ti.is_geometry())) {
548  llvm::Value* null_in_lv{nullptr};
549  if (arg_ti.is_fp()) {
550  null_in_lv =
551  static_cast<llvm::Value*>(executor->cgen_state_->inlineFpNull(arg_ti));
552  } else {
553  null_in_lv = static_cast<llvm::Value*>(executor->cgen_state_->inlineIntNull(
555  ? arg_ti
556  : target_info.sql_type));
557  }
558  CHECK(null_in_lv);
559  auto null_lv =
560  executor->cgen_state_->castToTypeIn(null_in_lv, (agg_chosen_bytes << 3));
561  agg_args.push_back(null_lv);
562  }
563  if (!target_info.is_distinct) {
565  query_mem_desc.threadsShareMemory()) {
566  agg_fname += "_shared";
567  if (needs_unnest_double_patch) {
568  agg_fname = patch_agg_fname(agg_fname);
569  }
570  }
571  auto agg_fname_call_ret_lv = group_by_and_agg->emitCall(agg_fname, agg_args);
572 
573  if (agg_fname.find("checked") != std::string::npos) {
574  group_by_and_agg->checkErrorCode(agg_fname_call_ret_lv);
575  }
576  }
577  }
578  const auto window_func = dynamic_cast<const Analyzer::WindowFunction*>(target_expr);
579  // window function with framing has a different code path and codegen logic
580  if (window_func && !window_func->hasFraming() &&
582  const auto window_func_context =
584  const auto pending_outputs =
585  LL_INT(window_func_context->aggregateStatePendingOutputs());
586  executor->cgen_state_->emitExternalCall("add_window_pending_output",
587  llvm::Type::getVoidTy(LL_CONTEXT),
588  {agg_args.front(), pending_outputs});
589  const auto& window_func_ti = window_func->get_type_info();
590  std::string apply_window_pending_outputs_name = "apply_window_pending_outputs";
591  switch (window_func_ti.get_type()) {
592  case kFLOAT: {
593  apply_window_pending_outputs_name += "_float";
594  if (query_mem_desc.didOutputColumnar()) {
595  apply_window_pending_outputs_name += "_columnar";
596  }
597  break;
598  }
599  case kDOUBLE: {
600  apply_window_pending_outputs_name += "_double";
601  break;
602  }
603  default: {
604  apply_window_pending_outputs_name += "_int";
605  if (query_mem_desc.didOutputColumnar()) {
606  apply_window_pending_outputs_name +=
607  std::to_string(window_func_ti.get_size() * 8);
608  } else {
609  apply_window_pending_outputs_name += "64";
610  }
611  break;
612  }
613  }
614  const auto partition_end =
615  LL_INT(reinterpret_cast<int64_t>(window_func_context->partitionEnd()));
616  executor->cgen_state_->emitExternalCall(apply_window_pending_outputs_name,
617  llvm::Type::getVoidTy(LL_CONTEXT),
618  {pending_outputs,
619  target_lvs.front(),
620  partition_end,
621  code_generator.posArg(nullptr)});
622  }
623 
624  ++slot_index;
625  ++target_lv_idx;
626  }
627 }
628 
630  const Executor* executor,
631  QueryMemoryDescriptor& query_mem_desc,
632  const CompilationOptions& co) {
633  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
634  if (query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter) == 0) {
635  CHECK(!dynamic_cast<const Analyzer::AggExpr*>(target_expr));
636  ++slot_index_counter;
637  ++target_index_counter;
638  return;
639  }
640  if (dynamic_cast<const Analyzer::UOper*>(target_expr) &&
641  static_cast<const Analyzer::UOper*>(target_expr)->get_optype() == kUNNEST) {
642  throw std::runtime_error("UNNEST not supported in the projection list yet.");
643  }
644  if ((executor->plan_state_->isLazyFetchColumn(target_expr) || !is_group_by) &&
645  (static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter)) <
646  sizeof(int64_t)) &&
647  !is_columnar_projection(query_mem_desc)) {
648  // TODO(miyu): enable different byte width in the layout w/o padding
650  }
651 
652  if (is_columnar_projection(query_mem_desc) &&
653  executor->plan_state_->isLazyFetchColumn(target_expr)) {
654  // For columnar outputs, we need to pad lazy fetched columns to 8 bytes to allow the
655  // lazy fetch index to be placed in the column. The QueryMemoryDescriptor is created
656  // before Lazy Fetch information is known, therefore we need to update the QMD with
657  // the new slot size width bytes for these columns.
658  query_mem_desc.setPaddedSlotWidthBytes(slot_index_counter, int8_t(8));
659  CHECK_EQ(query_mem_desc.getPaddedSlotWidthBytes(slot_index_counter), int8_t(8));
660  }
661 
662  auto target_info = get_target_info(target_expr, g_bigint_count);
663  auto arg_expr = agg_arg(target_expr);
664  if (arg_expr) {
667  target_info.skip_null_val = false;
668  } else if (query_mem_desc.getQueryDescriptionType() ==
670  !arg_expr->get_type_info().is_varlen()) {
671  // TODO: COUNT is currently not null-aware for varlen types. Need to add proper code
672  // generation for handling varlen nulls.
673  target_info.skip_null_val = true;
674  } else if (constrained_not_null(arg_expr, ra_exe_unit.quals)) {
675  target_info.skip_null_val = false;
676  }
677  }
678 
679  if (!(query_mem_desc.getQueryDescriptionType() ==
683  sample_exprs_to_codegen.emplace_back(target_expr,
684  target_info,
685  slot_index_counter,
686  target_index_counter++,
687  is_group_by);
688  } else {
689  target_exprs_to_codegen.emplace_back(target_expr,
690  target_info,
691  slot_index_counter,
692  target_index_counter++,
693  is_group_by);
694  }
695 
696  const auto agg_fn_names = agg_fn_base_names(
698  slot_index_counter += agg_fn_names.size();
699 }
700 
701 namespace {
702 
704  const QueryMemoryDescriptor& query_mem_desc) {
705  const bool is_group_by{query_mem_desc.isGroupBy()};
706  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_string() &&
707  target_info.sql_type.get_compression() != kENCODING_NONE) {
708  return get_agg_initial_val(target_info.agg_kind,
709  target_info.sql_type,
710  is_group_by,
711  query_mem_desc.getCompactByteWidth());
712  }
713  return 0;
714 }
715 
716 } // namespace
717 
719  GroupByAndAggregate* group_by_and_agg,
720  Executor* executor,
721  const QueryMemoryDescriptor& query_mem_desc,
722  const CompilationOptions& co,
723  const GpuSharedMemoryContext& gpu_smem_context,
724  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
725  const std::vector<llvm::Value*>& agg_out_vec,
726  llvm::Value* output_buffer_byte_stream,
727  llvm::Value* out_row_idx,
728  llvm::Value* varlen_output_buffer,
729  DiamondCodegen& diamond_codegen) const {
730  CHECK(group_by_and_agg);
731  CHECK(executor);
732  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
733 
734  for (const auto& target_expr_codegen : target_exprs_to_codegen) {
735  target_expr_codegen.codegen(group_by_and_agg,
736  executor,
737  query_mem_desc,
738  co,
739  gpu_smem_context,
740  agg_out_ptr_w_idx,
741  agg_out_vec,
742  output_buffer_byte_stream,
743  out_row_idx,
744  varlen_output_buffer,
745  diamond_codegen);
746  }
747  if (!sample_exprs_to_codegen.empty()) {
748  codegenSampleExpressions(group_by_and_agg,
749  executor,
750  query_mem_desc,
751  co,
752  agg_out_ptr_w_idx,
753  agg_out_vec,
754  output_buffer_byte_stream,
755  out_row_idx,
756  diamond_codegen);
757  }
758 }
759 
761  GroupByAndAggregate* group_by_and_agg,
762  Executor* executor,
763  const QueryMemoryDescriptor& query_mem_desc,
764  const CompilationOptions& co,
765  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
766  const std::vector<llvm::Value*>& agg_out_vec,
767  llvm::Value* output_buffer_byte_stream,
768  llvm::Value* out_row_idx,
769  DiamondCodegen& diamond_codegen) const {
770  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
771  CHECK(!sample_exprs_to_codegen.empty());
773  if (sample_exprs_to_codegen.size() == 1 &&
774  !sample_exprs_to_codegen.front().target_info.sql_type.is_varlen()) {
775  codegenSingleSlotSampleExpression(group_by_and_agg,
776  executor,
777  query_mem_desc,
778  co,
779  agg_out_ptr_w_idx,
780  agg_out_vec,
781  output_buffer_byte_stream,
782  out_row_idx,
783  diamond_codegen);
784  } else {
785  codegenMultiSlotSampleExpressions(group_by_and_agg,
786  executor,
787  query_mem_desc,
788  co,
789  agg_out_ptr_w_idx,
790  agg_out_vec,
791  output_buffer_byte_stream,
792  out_row_idx,
793  diamond_codegen);
794  }
795 }
796 
798  GroupByAndAggregate* group_by_and_agg,
799  Executor* executor,
800  const QueryMemoryDescriptor& query_mem_desc,
801  const CompilationOptions& co,
802  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
803  const std::vector<llvm::Value*>& agg_out_vec,
804  llvm::Value* output_buffer_byte_stream,
805  llvm::Value* out_row_idx,
806  DiamondCodegen& diamond_codegen) const {
807  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
808  CHECK_EQ(size_t(1), sample_exprs_to_codegen.size());
809  CHECK(!sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
811  // no need for the atomic if we only have one SAMPLE target
812  sample_exprs_to_codegen.front().codegen(group_by_and_agg,
813  executor,
814  query_mem_desc,
815  co,
816  {},
817  agg_out_ptr_w_idx,
818  agg_out_vec,
819  output_buffer_byte_stream,
820  out_row_idx,
821  /*varlen_output_buffer=*/nullptr,
822  diamond_codegen);
823 }
824 
826  GroupByAndAggregate* group_by_and_agg,
827  Executor* executor,
828  const QueryMemoryDescriptor& query_mem_desc,
829  const CompilationOptions& co,
830  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
831  const std::vector<llvm::Value*>& agg_out_vec,
832  llvm::Value* output_buffer_byte_stream,
833  llvm::Value* out_row_idx,
834  DiamondCodegen& diamond_codegen) const {
835  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
836  CHECK(sample_exprs_to_codegen.size() > 1 ||
837  sample_exprs_to_codegen.front().target_info.sql_type.is_varlen());
839  const auto& first_sample_expr = sample_exprs_to_codegen.front();
840  auto target_lvs = group_by_and_agg->codegenAggArg(first_sample_expr.target_expr, co);
841  CHECK_GE(target_lvs.size(), size_t(1));
842 
843  const auto init_val =
844  get_initial_agg_val(first_sample_expr.target_info, query_mem_desc);
845 
846  llvm::Value* agg_col_ptr{nullptr};
847  if (is_group_by) {
848  const auto agg_column_size_bytes =
849  query_mem_desc.isLogicalSizedColumnsAllowed() &&
850  !first_sample_expr.target_info.sql_type.is_varlen()
851  ? first_sample_expr.target_info.sql_type.get_size()
852  : sizeof(int64_t);
853  agg_col_ptr = group_by_and_agg->codegenAggColumnPtr(output_buffer_byte_stream,
854  out_row_idx,
855  agg_out_ptr_w_idx,
856  query_mem_desc,
857  agg_column_size_bytes,
858  first_sample_expr.base_slot_index,
859  first_sample_expr.target_idx);
860  } else {
861  CHECK_LT(static_cast<size_t>(first_sample_expr.base_slot_index), agg_out_vec.size());
862  agg_col_ptr =
863  executor->castToIntPtrTyIn(agg_out_vec[first_sample_expr.base_slot_index], 64);
864  }
865 
866  auto sample_cas_lv =
867  codegenSlotEmptyKey(agg_col_ptr, target_lvs, executor, query_mem_desc, init_val);
868 
869  DiamondCodegen sample_cfg(
870  sample_cas_lv, executor, false, "sample_valcheck", &diamond_codegen, false);
871 
872  for (const auto& target_expr_codegen : sample_exprs_to_codegen) {
873  target_expr_codegen.codegen(group_by_and_agg,
874  executor,
875  query_mem_desc,
876  co,
877  {},
878  agg_out_ptr_w_idx,
879  agg_out_vec,
880  output_buffer_byte_stream,
881  out_row_idx,
882  /*varlen_output_buffer=*/nullptr,
883  diamond_codegen,
884  &sample_cfg);
885  }
886 }
887 
889  llvm::Value* agg_col_ptr,
890  std::vector<llvm::Value*>& target_lvs,
891  Executor* executor,
892  const QueryMemoryDescriptor& query_mem_desc,
893  const int64_t init_val) const {
894  AUTOMATIC_IR_METADATA(executor->cgen_state_.get());
895  const auto& first_sample_expr = sample_exprs_to_codegen.front();
896  const auto first_sample_slot_bytes =
897  first_sample_expr.target_info.sql_type.is_varlen()
898  ? sizeof(int64_t)
899  : first_sample_expr.target_info.sql_type.get_size();
900  llvm::Value* target_lv_casted{nullptr};
901  // deciding whether proper casting is required for the first sample's slot:
902  if (first_sample_expr.target_info.sql_type.is_varlen()) {
903  target_lv_casted =
904  LL_BUILDER.CreatePtrToInt(target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
905  } else if (first_sample_expr.target_info.sql_type.is_fp()) {
906  // Initialization value for SAMPLE on a float column should be 0
907  CHECK_EQ(init_val, 0);
908  if (query_mem_desc.isLogicalSizedColumnsAllowed()) {
909  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
910  target_lvs.front(),
911  first_sample_slot_bytes == sizeof(float) ? llvm::Type::getInt32Ty(LL_CONTEXT)
912  : llvm::Type::getInt64Ty(LL_CONTEXT));
913  } else {
914  target_lv_casted = executor->cgen_state_->ir_builder_.CreateFPToSI(
915  target_lvs.front(), llvm::Type::getInt64Ty(LL_CONTEXT));
916  }
917  } else if (first_sample_slot_bytes != sizeof(int64_t) &&
918  !query_mem_desc.isLogicalSizedColumnsAllowed()) {
919  target_lv_casted =
920  executor->cgen_state_->ir_builder_.CreateCast(llvm::Instruction::CastOps::SExt,
921  target_lvs.front(),
922  llvm::Type::getInt64Ty(LL_CONTEXT));
923  } else {
924  target_lv_casted = target_lvs.front();
925  }
926 
927  std::string slot_empty_cas_func_name("slotEmptyKeyCAS");
928  llvm::Value* init_val_lv{LL_INT(init_val)};
929  if (query_mem_desc.isLogicalSizedColumnsAllowed() &&
930  !first_sample_expr.target_info.sql_type.is_varlen()) {
931  // add proper suffix to the function name:
932  switch (first_sample_slot_bytes) {
933  case 1:
934  slot_empty_cas_func_name += "_int8";
935  break;
936  case 2:
937  slot_empty_cas_func_name += "_int16";
938  break;
939  case 4:
940  slot_empty_cas_func_name += "_int32";
941  break;
942  case 8:
943  break;
944  default:
945  UNREACHABLE() << "Invalid slot size for slotEmptyKeyCAS function.";
946  break;
947  }
948  if (first_sample_slot_bytes != sizeof(int64_t)) {
949  init_val_lv = llvm::ConstantInt::get(
950  get_int_type(first_sample_slot_bytes * 8, LL_CONTEXT), init_val);
951  }
952  }
953 
954  auto sample_cas_lv = executor->cgen_state_->emitExternalCall(
955  slot_empty_cas_func_name,
956  llvm::Type::getInt1Ty(executor->cgen_state_->context_),
957  {agg_col_ptr, target_lv_casted, init_val_lv});
958  return sample_cas_lv;
959 }
size_t varlenOutputRowSizeToSlot(const size_t slot_idx) const
#define LL_BUILDER
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
#define CHECK_EQ(x, y)
Definition: Logger.h:230
bool target_has_geo(const TargetInfo &target_info)
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
std::vector< std::string > agg_fn_base_names(const TargetInfo &target_info, const bool is_varlen_projection)
bool isLogicalSizedColumnsAllowed() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen, DiamondCodegen *sample_cfg=nullptr) const
void codegenMultiSlotSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
llvm::Value * posArg(const Analyzer::Expr *) const
Definition: ColumnIR.cpp:515
#define UNREACHABLE()
Definition: Logger.h:266
#define CHECK_GE(x, y)
Definition: Logger.h:235
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:111
#define LLVM_ALIGN(alignment)
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:329
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
bool skip_null_val
Definition: TargetInfo.h:54
llvm::BasicBlock * cond_true_
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:97
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
std::string patch_agg_fname(const std::string &agg_name)
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
Definition: sqldefs.h:74
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool is_varlen_projection(const Analyzer::Expr *target_expr, const SQLTypeInfo &ti)
bool is_agg
Definition: TargetInfo.h:50
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
Definition: Execute.h:166
void operator()(const Analyzer::Expr *target_expr, const Executor *executor, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
#define LL_INT(v)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
void codegenSampleExpressions(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
bool g_bigint_count
Definition: sqldefs.h:76
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
#define LL_CONTEXT
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:107
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define AUTOMATIC_IR_METADATA(CGENSTATE)
SQLAgg agg_kind
Definition: TargetInfo.h:51
QueryDescriptionType getQueryDescriptionType() const
ExecutorDeviceType device_type
std::optional< size_t > varlenOutputBufferElemSize() const
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:44
#define CHECK_LT(x, y)
Definition: Logger.h:232
llvm::Value * codegenSlotEmptyKey(llvm::Value *agg_col_ptr, std::vector< llvm::Value * > &target_lvs, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const int64_t init_val) const
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
const Analyzer::Expr * target_expr
Definition: sqldefs.h:77
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
bool is_simple_count(const TargetInfo &target_info)
#define CHECK(condition)
Definition: Logger.h:222
bool is_geometry() const
Definition: sqltypes.h:522
static void resetWindowFunctionContext(Executor *executor)
void setPaddedSlotWidthBytes(const size_t slot_idx, const int8_t bytes)
int64_t get_initial_agg_val(const TargetInfo &target_info, const QueryMemoryDescriptor &query_mem_desc)
std::string numeric_type_name(const SQLTypeInfo &ti)
Definition: Execute.h:209
bool is_string() const
Definition: sqltypes.h:510
bool is_distinct
Definition: TargetInfo.h:55
Definition: sqldefs.h:75
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
int get_physical_coord_cols() const
Definition: sqltypes.h:375
Definition: sqldefs.h:73
size_t getColOffInBytes(const size_t col_idx) const
void codegenAggregate(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::vector< llvm::Value * > &target_lvs, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, int32_t slot_index) const
bool is_columnar_projection(const QueryMemoryDescriptor &query_mem_desc)
#define ROW_FUNC
void codegenSingleSlotSampleExpression(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, DiamondCodegen &diamond_codegen) const
bool is_agg_domain_range_equivalent(const SQLAgg &agg_kind)
Definition: TargetInfo.h:79