OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ResultSetReductionJIT.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ResultSetReductionJIT.h"
20 
21 #include "CodeGenerator.h"
22 #include "DynamicWatchdog.h"
23 #include "Execute.h"
24 #include "IRCodegenUtils.h"
26 
27 #include "Shared/likely.h"
28 #include "Shared/mapdpath.h"
29 
30 #include <llvm/Bitcode/BitcodeReader.h>
31 #include <llvm/IR/Function.h>
32 #include <llvm/IR/IRBuilder.h>
33 #include <llvm/IR/Verifier.h>
34 #include <llvm/Support/SourceMgr.h>
35 #include <llvm/Support/raw_os_ostream.h>
36 
37 extern std::unique_ptr<llvm::Module> g_rt_module;
38 
40 
42 
43 namespace {
44 
45 // Error code to be returned when the watchdog timer triggers during the reduction.
46 const int32_t WATCHDOG_ERROR{-1};
47 // Use the interpreter, not the JIT, for a number of entries lower than the threshold.
48 const size_t INTERP_THRESHOLD{25};
49 
50 // Load the value stored at 'ptr' interpreted as 'ptr_type'.
51 Value* emit_load(Value* ptr, Type ptr_type, Function* function) {
52  return function->add<Load>(
53  function->add<Cast>(Cast::CastOp::BitCast, ptr, ptr_type, ""),
54  ptr->label() + "_loaded");
55 }
56 
57 // Load the value stored at 'ptr' as a 32-bit signed integer.
58 Value* emit_load_i32(Value* ptr, Function* function) {
59  return emit_load(ptr, Type::Int32Ptr, function);
60 }
61 
62 // Load the value stored at 'ptr' as a 64-bit signed integer.
63 Value* emit_load_i64(Value* ptr, Function* function) {
64  return emit_load(ptr, Type::Int64Ptr, function);
65 }
66 
67 // Read a 32- or 64-bit integer stored at 'ptr' and sign extend to 64-bit.
68 Value* emit_read_int_from_buff(Value* ptr, const int8_t compact_sz, Function* function) {
69  switch (compact_sz) {
70  case 8: {
71  return emit_load_i64(ptr, function);
72  }
73  case 4: {
74  const auto loaded_val = emit_load_i32(ptr, function);
75  return function->add<Cast>(Cast::CastOp::SExt, loaded_val, Type::Int64, "");
76  }
77  default: {
78  LOG(FATAL) << "Invalid byte width: " << compact_sz;
79  return nullptr;
80  }
81  }
82 }
83 
84 // Emit a runtime call to accumulate into the 'val_ptr' byte address the 'other_ptr'
85 // value when the type is specified as not null.
86 void emit_aggregate_one_value(const std::string& agg_kind,
87  Value* val_ptr,
88  Value* other_ptr,
89  const size_t chosen_bytes,
90  const TargetInfo& agg_info,
91  Function* ir_reduce_one_entry) {
92  const auto sql_type = get_compact_type(agg_info);
93  const auto dest_name = agg_kind + "_dest";
94  if (sql_type.is_fp()) {
95  if (chosen_bytes == sizeof(float)) {
96  const auto agg = ir_reduce_one_entry->add<Cast>(
97  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
98  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
99  ir_reduce_one_entry->add<Call>(
100  "agg_" + agg_kind + "_float", std::vector<const Value*>{agg, val}, "");
101  } else {
102  CHECK_EQ(chosen_bytes, sizeof(double));
103  const auto agg = ir_reduce_one_entry->add<Cast>(
104  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
105  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
106  ir_reduce_one_entry->add<Call>(
107  "agg_" + agg_kind + "_double", std::vector<const Value*>{agg, val}, "");
108  }
109  } else {
110  if (chosen_bytes == sizeof(int32_t)) {
111  const auto agg = ir_reduce_one_entry->add<Cast>(
112  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
113  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
114  ir_reduce_one_entry->add<Call>(
115  "agg_" + agg_kind + "_int32", std::vector<const Value*>{agg, val}, "");
116  } else {
117  CHECK_EQ(chosen_bytes, sizeof(int64_t));
118  const auto agg = ir_reduce_one_entry->add<Cast>(
119  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
120  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
121  ir_reduce_one_entry->add<Call>(
122  "agg_" + agg_kind, std::vector<const Value*>{agg, val}, "");
123  }
124  }
125 }
126 
127 // Same as above, but support nullable types as well.
128 void emit_aggregate_one_nullable_value(const std::string& agg_kind,
129  Value* val_ptr,
130  Value* other_ptr,
131  const int64_t init_val,
132  const size_t chosen_bytes,
133  const TargetInfo& agg_info,
134  Function* ir_reduce_one_entry) {
135  const auto dest_name = agg_kind + "_dest";
136  if (agg_info.skip_null_val) {
137  const auto sql_type = get_compact_type(agg_info);
138  if (sql_type.is_fp()) {
139  if (chosen_bytes == sizeof(float)) {
140  const auto agg = ir_reduce_one_entry->add<Cast>(
141  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
142  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
143  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
144  *reinterpret_cast<const float*>(may_alias_ptr(&init_val)), Type::Float);
145  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_float_skip_val",
146  std::vector<const Value*>{agg, val, init_val_lv},
147  "");
148  } else {
149  CHECK_EQ(chosen_bytes, sizeof(double));
150  const auto agg = ir_reduce_one_entry->add<Cast>(
151  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
152  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
153  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
154  *reinterpret_cast<const double*>(may_alias_ptr(&init_val)), Type::Double);
155  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_double_skip_val",
156  std::vector<const Value*>{agg, val, init_val_lv},
157  "");
158  }
159  } else {
160  if (chosen_bytes == sizeof(int32_t)) {
161  const auto agg = ir_reduce_one_entry->add<Cast>(
162  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
163  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
164  const auto init_val_lv =
165  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int32);
166  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_int32_skip_val",
167  std::vector<const Value*>{agg, val, init_val_lv},
168  "");
169  } else {
170  CHECK_EQ(chosen_bytes, sizeof(int64_t));
171  const auto agg = ir_reduce_one_entry->add<Cast>(
172  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
173  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
174  const auto init_val_lv =
175  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64);
176  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_skip_val",
177  std::vector<const Value*>{agg, val, init_val_lv},
178  "");
179  }
180  }
181  } else {
183  agg_kind, val_ptr, other_ptr, chosen_bytes, agg_info, ir_reduce_one_entry);
184  }
185 }
186 
187 // Emit code to accumulate the 'other_ptr' count into the 'val_ptr' destination.
189  Value* other_ptr,
190  const size_t chosen_bytes,
191  Function* ir_reduce_one_entry) {
192  const auto dest_name = "count_dest";
193  if (chosen_bytes == sizeof(int32_t)) {
194  const auto agg = ir_reduce_one_entry->add<Cast>(
195  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
196  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
197  ir_reduce_one_entry->add<Call>(
198  "agg_sum_int32", std::vector<const Value*>{agg, val}, "");
199  } else {
200  CHECK_EQ(chosen_bytes, sizeof(int64_t));
201  const auto agg = ir_reduce_one_entry->add<Cast>(
202  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
203  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
204  ir_reduce_one_entry->add<Call>("agg_sum", std::vector<const Value*>{agg, val}, "");
205  }
206 }
207 
208 // Emit code to load the value stored at the 'other_pi8' as an integer of the given width
209 // 'chosen_bytes' and write it to the 'slot_pi8' destination only if necessary (the
210 // existing value at destination is the initialization value).
212  Value* other_pi8,
213  const int64_t init_val,
214  const size_t chosen_bytes,
215  Function* ir_reduce_one_entry) {
216  const auto func_name = "write_projection_int" + std::to_string(chosen_bytes * 8);
217  if (chosen_bytes == sizeof(int32_t)) {
218  const auto proj_val = emit_load_i32(other_pi8, ir_reduce_one_entry);
219  ir_reduce_one_entry->add<Call>(
220  func_name,
221  std::vector<const Value*>{
222  slot_pi8,
223  proj_val,
224  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
225  "");
226  } else {
227  CHECK_EQ(chosen_bytes, sizeof(int64_t));
228  const auto proj_val = emit_load_i64(other_pi8, ir_reduce_one_entry);
229  ir_reduce_one_entry->add<Call>(
230  func_name,
231  std::vector<const Value*>{
232  slot_pi8,
233  proj_val,
234  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
235  "");
236  }
237 }
238 
239 // Emit code to load the value stored at the 'other_pi8' as an integer of the given width
240 // 'chosen_bytes' and write it to the 'slot_pi8' destination only if necessary (the
241 // existing value at destination is the initialization value).
243  Value* other_pi8,
244  const int64_t init_val,
245  const size_t chosen_bytes,
246  Function* ir_reduce_one_entry) {
247  if (chosen_bytes == sizeof(int32_t)) {
248  const auto func_name = "checked_single_agg_id_int32";
249  const auto proj_val = emit_load_i32(other_pi8, ir_reduce_one_entry);
250  const auto slot_pi32 = ir_reduce_one_entry->add<Cast>(
251  Cast::CastOp::BitCast, slot_pi8, Type::Int32Ptr, "");
252  return ir_reduce_one_entry->add<Call>(
253  func_name,
254  Type::Int32,
255  std::vector<const Value*>{
256  slot_pi32,
257  proj_val,
258  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int32)},
259  "");
260  } else {
261  const auto func_name = "checked_single_agg_id";
262  CHECK_EQ(chosen_bytes, sizeof(int64_t));
263  const auto proj_val = emit_load_i64(other_pi8, ir_reduce_one_entry);
264  const auto slot_pi64 = ir_reduce_one_entry->add<Cast>(
265  Cast::CastOp::BitCast, slot_pi8, Type::Int64Ptr, "");
266 
267  return ir_reduce_one_entry->add<Call>(
268  func_name,
269  Type::Int32,
270  std::vector<const Value*>{
271  slot_pi64,
272  proj_val,
273  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
274  "");
275  }
276 }
277 
278 std::unique_ptr<Function> create_function(
279  const std::string name,
280  const std::vector<Function::NamedArg>& arg_types,
281  const Type ret_type,
282  const bool always_inline) {
283  return std::make_unique<Function>(name, arg_types, ret_type, always_inline);
284 }
285 
286 // Create the declaration for the 'is_empty_entry' function. Use private linkage since
287 // it's a helper only called from the generated code and mark it as always inline.
288 std::unique_ptr<Function> setup_is_empty_entry(ReductionCode* reduction_code) {
289  return create_function(
290  "is_empty_entry", {{"row_ptr", Type::Int8Ptr}}, Type::Int1, /*always_inline=*/true);
291 }
292 
293 // Create the declaration for the 'reduce_one_entry' helper.
294 std::unique_ptr<Function> setup_reduce_one_entry(ReductionCode* reduction_code,
295  const QueryDescriptionType hash_type) {
296  std::string this_ptr_name;
297  std::string that_ptr_name;
298  switch (hash_type) {
300  this_ptr_name = "this_targets_ptr";
301  that_ptr_name = "that_targets_ptr";
302  break;
303  }
306  this_ptr_name = "this_row_ptr";
307  that_ptr_name = "that_row_ptr";
308  break;
309  }
310  default: {
311  LOG(FATAL) << "Unexpected query description type";
312  }
313  }
314  return create_function("reduce_one_entry",
315  {{this_ptr_name, Type::Int8Ptr},
316  {that_ptr_name, Type::Int8Ptr},
317  {"this_qmd", Type::VoidPtr},
318  {"that_qmd", Type::VoidPtr},
319  {"serialized_varlen_buffer_arg", Type::VoidPtr}},
320  Type::Int32,
321  /*always_inline=*/true);
322 }
323 
324 // Create the declaration for the 'reduce_one_entry_idx' helper.
325 std::unique_ptr<Function> setup_reduce_one_entry_idx(ReductionCode* reduction_code) {
326  return create_function("reduce_one_entry_idx",
327  {{"this_buff", Type::Int8Ptr},
328  {"that_buff", Type::Int8Ptr},
329  {"that_entry_idx", Type::Int32},
330  {"that_entry_count", Type::Int32},
331  {"this_qmd_handle", Type::VoidPtr},
332  {"that_qmd_handle", Type::VoidPtr},
333  {"serialized_varlen_buffer", Type::VoidPtr}},
334  Type::Int32,
335  /*always_inline=*/true);
336 }
337 
338 // Create the declaration for the 'reduce_loop' entry point. Use external linkage, this is
339 // the public API of the generated code directly used from result set reduction.
340 std::unique_ptr<Function> setup_reduce_loop(ReductionCode* reduction_code) {
341  return create_function("reduce_loop",
342  {{"this_buff", Type::Int8Ptr},
343  {"that_buff", Type::Int8Ptr},
344  {"start_index", Type::Int32},
345  {"end_index", Type::Int32},
346  {"that_entry_count", Type::Int32},
347  {"this_qmd_handle", Type::VoidPtr},
348  {"that_qmd_handle", Type::VoidPtr},
349  {"serialized_varlen_buffer", Type::VoidPtr}},
350  Type::Int32,
351  /*always_inline=*/false);
352 }
353 
354 llvm::Function* create_llvm_function(const Function* function,
355  const CgenState* cgen_state) {
356  auto& ctx = cgen_state->context_;
357  std::vector<llvm::Type*> parameter_types;
358  const auto& arg_types = function->arg_types();
359  for (const auto& named_arg : arg_types) {
360  CHECK(named_arg.type != Type::Void);
361  parameter_types.push_back(llvm_type(named_arg.type, ctx));
362  }
363  const auto func_type = llvm::FunctionType::get(
364  llvm_type(function->ret_type(), ctx), parameter_types, false);
365  const auto linkage = function->always_inline() ? llvm::Function::PrivateLinkage
366  : llvm::Function::ExternalLinkage;
367  auto func =
368  llvm::Function::Create(func_type, linkage, function->name(), cgen_state->module_);
369  const auto arg_it = func->arg_begin();
370  for (size_t i = 0; i < arg_types.size(); ++i) {
371  const auto arg = &*(arg_it + i);
372  arg->setName(arg_types[i].name);
373  }
374  if (function->always_inline()) {
376  }
377  return func;
378 }
379 
380 // Setup the reduction function and helpers declarations, create a module and a code
381 // generation state object.
383  ReductionCode reduction_code{};
384  reduction_code.ir_is_empty = setup_is_empty_entry(&reduction_code);
385  reduction_code.ir_reduce_one_entry = setup_reduce_one_entry(&reduction_code, hash_type);
386  reduction_code.ir_reduce_one_entry_idx = setup_reduce_one_entry_idx(&reduction_code);
387  reduction_code.ir_reduce_loop = setup_reduce_loop(&reduction_code);
388  return reduction_code;
389 }
390 
392  return hash_type == QueryDescriptionType::GroupByBaselineHash ||
395 }
396 
397 // Variable length sample fast path (no serialized variable length buffer).
398 void varlen_buffer_sample(int8_t* this_ptr1,
399  int8_t* this_ptr2,
400  const int8_t* that_ptr1,
401  const int8_t* that_ptr2,
402  const int64_t init_val) {
403  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
404  if (rhs_proj_col != init_val) {
405  *reinterpret_cast<int64_t*>(this_ptr1) = rhs_proj_col;
406  }
407  CHECK(this_ptr2 && that_ptr2);
408  *reinterpret_cast<int64_t*>(this_ptr2) = *reinterpret_cast<const int64_t*>(that_ptr2);
409 }
410 
411 } // namespace
412 
414  const void* serialized_varlen_buffer_handle,
415  int8_t* this_ptr1,
416  int8_t* this_ptr2,
417  const int8_t* that_ptr1,
418  const int8_t* that_ptr2,
419  const int64_t init_val,
420  const int64_t length_to_elems) {
421  if (!serialized_varlen_buffer_handle) {
422  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
423  return;
424  }
425  const auto& serialized_varlen_buffer =
426  *reinterpret_cast<const std::vector<std::string>*>(serialized_varlen_buffer_handle);
427  if (!serialized_varlen_buffer.empty()) {
428  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
429  CHECK_LT(static_cast<size_t>(rhs_proj_col), serialized_varlen_buffer.size());
430  const auto& varlen_bytes_str = serialized_varlen_buffer[rhs_proj_col];
431  const auto str_ptr = reinterpret_cast<const int8_t*>(varlen_bytes_str.c_str());
432  *reinterpret_cast<int64_t*>(this_ptr1) = reinterpret_cast<const int64_t>(str_ptr);
433  *reinterpret_cast<int64_t*>(this_ptr2) =
434  static_cast<int64_t>(varlen_bytes_str.size() / length_to_elems);
435  } else {
436  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
437  }
438 }
439 
440 // Wrappers to be called from the generated code, sharing implementation with the rest of
441 // the system.
442 
443 extern "C" void count_distinct_set_union_jit_rt(const int64_t new_set_handle,
444  const int64_t old_set_handle,
445  const void* that_qmd_handle,
446  const void* this_qmd_handle,
447  const int64_t target_logical_idx) {
448  const auto that_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(that_qmd_handle);
449  const auto this_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
450  const auto& new_count_distinct_desc =
451  that_qmd->getCountDistinctDescriptor(target_logical_idx);
452  const auto& old_count_distinct_desc =
453  this_qmd->getCountDistinctDescriptor(target_logical_idx);
454  CHECK(old_count_distinct_desc.impl_type_ != CountDistinctImplType::Invalid);
455  CHECK(old_count_distinct_desc.impl_type_ == new_count_distinct_desc.impl_type_);
457  new_set_handle, old_set_handle, new_count_distinct_desc, old_count_distinct_desc);
458 }
459 
461  const int8_t* key,
462  const uint32_t key_count,
463  const void* this_qmd_handle,
464  const int8_t* that_buff,
465  const uint32_t that_entry_idx,
466  const uint32_t that_entry_count,
467  const uint32_t row_size_bytes,
468  int64_t** buff_out,
469  uint8_t* empty) {
470  const auto& this_qmd = *reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
471  const auto gvi = get_group_value_reduction(reinterpret_cast<int64_t*>(groups_buffer),
472  this_qmd.getEntryCount(),
473  reinterpret_cast<const int64_t*>(key),
474  key_count,
475  this_qmd.getEffectiveKeyWidth(),
476  this_qmd,
477  reinterpret_cast<const int64_t*>(that_buff),
478  that_entry_idx,
479  that_entry_count,
480  row_size_bytes >> 3);
481  *buff_out = gvi.first;
482  *empty = gvi.second;
483 }
484 
485 extern "C" uint8_t check_watchdog_rt(const size_t sample_seed) {
486  if (UNLIKELY(g_enable_dynamic_watchdog && (sample_seed & 0x3F) == 0 &&
487  dynamic_watchdog())) {
488  return true;
489  }
490  return false;
491 }
492 
494  const std::vector<TargetInfo>& targets,
495  const std::vector<int64_t>& target_init_vals)
496  : query_mem_desc_(query_mem_desc)
497  , targets_(targets)
498  , target_init_vals_(target_init_vals) {}
499 
500 // The code generated for a reduction between two result set buffers is structured in
501 // several functions and their IR is stored in the 'ReductionCode' structure. At a high
502 // level, the pseudocode is:
503 //
504 // func is_empty_func(row_ptr):
505 // ...
506 //
507 // func reduce_func_baseline(this_ptr, that_ptr):
508 // if is_empty_func(that_ptr):
509 // return
510 // for each target in the row:
511 // reduce target from that_ptr into this_ptr
512 //
513 // func reduce_func_perfect_hash(this_ptr, that_ptr):
514 // if is_empty_func(that_ptr):
515 // return
516 // for each target in the row:
517 // reduce target from that_ptr into this_ptr
518 //
519 // func reduce_func_idx(this_buff, that_buff, that_entry_index):
520 // that_ptr = that_result_set[that_entry_index]
521 // # Retrieval of 'this_ptr' is different between perfect hash and baseline.
522 // this_ptr = this_result_set[that_entry_index]
523 // or
524 // get_row(key(that_row_ptr), this_result_setBuffer)
525 // reduce_func_[baseline|perfect_hash](this_ptr, that_ptr)
526 //
527 // func reduce_loop(this_buff, that_buff, start_entry_index, end_entry_index):
528 // for that_entry_index in [start_entry_index, end_entry_index):
529 // reduce_func_idx(this_buff, that_buff, that_entry_index)
530 
532  const auto hash_type = query_mem_desc_.getQueryDescriptionType();
534  return {};
535  }
536  auto reduction_code = setup_functions_ir(hash_type);
537  isEmpty(reduction_code);
541  reduceOneEntryNoCollisions(reduction_code);
542  reduceOneEntryNoCollisionsIdx(reduction_code);
543  break;
544  }
546  reduceOneEntryBaseline(reduction_code);
547  reduceOneEntryBaselineIdx(reduction_code);
548  break;
549  }
550  default: {
551  LOG(FATAL) << "Unexpected query description type";
552  }
553  }
554  reduceLoop(reduction_code);
555  // For small result sets, avoid native code generation and use the interpreter instead.
558  return reduction_code;
559  }
560  std::lock_guard<std::mutex> reduction_guard(ReductionCode::s_reduction_mutex);
561  CodeCacheKey key{cacheKey()};
562  const auto compilation_context = s_code_cache.get(key);
563  if (compilation_context) {
564  auto cpu_context =
565  std::dynamic_pointer_cast<CpuCompilationContext>(compilation_context->first);
566  CHECK(cpu_context);
567  return {reinterpret_cast<ReductionCode::FuncPtr>(cpu_context->func()),
568  nullptr,
569  nullptr,
570  nullptr,
571  std::move(reduction_code.ir_is_empty),
572  std::move(reduction_code.ir_reduce_one_entry),
573  std::move(reduction_code.ir_reduce_one_entry_idx),
574  std::move(reduction_code.ir_reduce_loop)};
575  }
576  reduction_code.cgen_state.reset(new CgenState({}, false));
577  auto cgen_state = reduction_code.cgen_state.get();
578  std::unique_ptr<llvm::Module> module = runtime_module_shallow_copy(cgen_state);
579  cgen_state->module_ = module.get();
580  auto ir_is_empty = create_llvm_function(reduction_code.ir_is_empty.get(), cgen_state);
581  auto ir_reduce_one_entry =
582  create_llvm_function(reduction_code.ir_reduce_one_entry.get(), cgen_state);
583  auto ir_reduce_one_entry_idx =
584  create_llvm_function(reduction_code.ir_reduce_one_entry_idx.get(), cgen_state);
585  auto ir_reduce_loop =
586  create_llvm_function(reduction_code.ir_reduce_loop.get(), cgen_state);
587  std::unordered_map<const Function*, llvm::Function*> f;
588  f.emplace(reduction_code.ir_is_empty.get(), ir_is_empty);
589  f.emplace(reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry);
590  f.emplace(reduction_code.ir_reduce_one_entry_idx.get(), ir_reduce_one_entry_idx);
591  f.emplace(reduction_code.ir_reduce_loop.get(), ir_reduce_loop);
592  translate_function(reduction_code.ir_is_empty.get(), ir_is_empty, reduction_code, f);
594  reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry, reduction_code, f);
595  translate_function(reduction_code.ir_reduce_one_entry_idx.get(),
596  ir_reduce_one_entry_idx,
597  reduction_code,
598  f);
600  reduction_code.ir_reduce_loop.get(), ir_reduce_loop, reduction_code, f);
601  reduction_code.llvm_reduce_loop = ir_reduce_loop;
602  reduction_code.module = std::move(module);
603  return finalizeReductionCode(std::move(reduction_code),
604  ir_is_empty,
605  ir_reduce_one_entry,
606  ir_reduce_one_entry_idx,
607  key);
608 }
609 
611  // Clear stub cache to avoid crash caused by non-deterministic static destructor order
612  // of LLVM context and the cache.
615  g_rt_module = nullptr;
616 }
617 
618 void ResultSetReductionJIT::isEmpty(const ReductionCode& reduction_code) const {
619  auto ir_is_empty = reduction_code.ir_is_empty.get();
622  Value* key{nullptr};
623  Value* empty_key_val{nullptr};
624  const auto keys_ptr = ir_is_empty->arg(0);
629  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
630  target_init_vals_.size());
631  const int64_t target_slot_off =
633  const auto slot_ptr = ir_is_empty->add<GetElementPtr>(
634  keys_ptr,
635  ir_is_empty->addConstant<ConstantInt>(target_slot_off, Type::Int32),
636  "is_empty_slot_ptr");
637  const auto compact_sz =
639  key = emit_read_int_from_buff(slot_ptr, compact_sz, ir_is_empty);
640  empty_key_val = ir_is_empty->addConstant<ConstantInt>(
642  } else {
644  case 4: {
647  key = emit_load_i32(keys_ptr, ir_is_empty);
648  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_32, Type::Int32);
649  break;
650  }
651  case 8: {
652  key = emit_load_i64(keys_ptr, ir_is_empty);
653  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_64, Type::Int64);
654  break;
655  }
656  default:
657  LOG(FATAL) << "Invalid key width";
658  }
659  }
660  const auto ret =
661  ir_is_empty->add<ICmp>(ICmp::Predicate::EQ, key, empty_key_val, "is_key_empty");
662  ir_is_empty->add<Ret>(ret);
663 }
664 
666  const ReductionCode& reduction_code) const {
667  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
668  const auto this_row_ptr = ir_reduce_one_entry->arg(0);
669  const auto that_row_ptr = ir_reduce_one_entry->arg(1);
670  const auto that_is_empty =
671  ir_reduce_one_entry->add<Call>(reduction_code.ir_is_empty.get(),
672  std::vector<const Value*>{that_row_ptr},
673  "that_is_empty");
674  ir_reduce_one_entry->add<ReturnEarly>(
675  that_is_empty, ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32), "");
676 
677  const auto key_bytes = get_key_bytes_rowwise(query_mem_desc_);
678  if (key_bytes) { // copy the key from right hand side
679  ir_reduce_one_entry->add<MemCpy>(
680  this_row_ptr,
681  that_row_ptr,
682  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes, Type::Int32));
683  }
684 
685  const auto key_bytes_with_padding = align_to_int64(key_bytes);
686  const auto key_bytes_lv =
687  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes_with_padding, Type::Int32);
688  const auto this_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
689  this_row_ptr, key_bytes_lv, "this_targets_start");
690  const auto that_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
691  that_row_ptr, key_bytes_lv, "that_targets_start");
692 
694  ir_reduce_one_entry, this_targets_start_ptr, that_targets_start_ptr);
695 }
696 
698  Function* ir_reduce_one_entry,
699  Value* this_targets_start_ptr,
700  Value* that_targets_start_ptr) const {
701  const auto& col_slot_context = query_mem_desc_.getColSlotContext();
702  Value* this_targets_ptr = this_targets_start_ptr;
703  Value* that_targets_ptr = that_targets_start_ptr;
704  size_t init_agg_val_idx = 0;
705  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
706  ++target_logical_idx) {
707  const auto& target_info = targets_[target_logical_idx];
708  const auto& slots_for_col = col_slot_context.getSlotsForCol(target_logical_idx);
709  Value* this_ptr2{nullptr};
710  Value* that_ptr2{nullptr};
711 
712  bool two_slot_target{false};
713  if (target_info.is_agg &&
714  (target_info.agg_kind == kAVG ||
715  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
716  // Note that this assumes if one of the slot pairs in a given target is an array,
717  // all slot pairs are arrays. Currently this is true for all geo targets, but we
718  // should better codify and store this information in the future
719  two_slot_target = true;
720  }
721 
722  for (size_t target_slot_idx = slots_for_col.front();
723  target_slot_idx < slots_for_col.back() + 1;
724  target_slot_idx += 2) {
725  const auto slot_off_val = query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx);
726  const auto slot_off =
727  ir_reduce_one_entry->addConstant<ConstantInt>(slot_off_val, Type::Int32);
728  if (UNLIKELY(two_slot_target)) {
729  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
730  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
731  this_targets_ptr, slot_off, "this_" + desc);
732  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
733  that_targets_ptr, slot_off, "that_" + desc);
734  }
735  reduceOneSlot(this_targets_ptr,
736  this_ptr2,
737  that_targets_ptr,
738  that_ptr2,
739  target_info,
740  target_logical_idx,
741  target_slot_idx,
742  init_agg_val_idx,
743  slots_for_col.front(),
744  ir_reduce_one_entry);
745  auto increment_agg_val_idx_maybe =
746  [&init_agg_val_idx, &target_logical_idx, this](const int slot_count) {
748  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
749  init_agg_val_idx += slot_count;
750  }
751  };
752  if (target_logical_idx + 1 == targets_.size() &&
753  target_slot_idx + 1 >= slots_for_col.back()) {
754  break;
755  }
756  const auto next_desc =
757  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
758  if (UNLIKELY(two_slot_target)) {
759  increment_agg_val_idx_maybe(2);
760  const auto two_slot_off = ir_reduce_one_entry->addConstant<ConstantInt>(
761  slot_off_val + query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx + 1),
762  Type::Int32);
763  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
764  this_targets_ptr, two_slot_off, "this_" + next_desc);
765  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
766  that_targets_ptr, two_slot_off, "that_" + next_desc);
767  } else {
768  increment_agg_val_idx_maybe(1);
769  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
770  this_targets_ptr, slot_off, "this_" + next_desc);
771  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
772  that_targets_ptr, slot_off, "that_" + next_desc);
773  }
774  }
775  }
776  ir_reduce_one_entry->add<Ret>(
777  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32));
778 }
779 
781  const ReductionCode& reduction_code) const {
782  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
783  const auto this_targets_ptr_arg = ir_reduce_one_entry->arg(0);
784  const auto that_targets_ptr_arg = ir_reduce_one_entry->arg(1);
785  Value* this_ptr1 = this_targets_ptr_arg;
786  Value* that_ptr1 = that_targets_ptr_arg;
787  size_t j = 0;
788  size_t init_agg_val_idx = 0;
789  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
790  ++target_logical_idx) {
791  const auto& target_info = targets_[target_logical_idx];
792  Value* this_ptr2{nullptr};
793  Value* that_ptr2{nullptr};
794  if (target_info.is_agg &&
795  (target_info.agg_kind == kAVG ||
796  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
797  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
798  const auto second_slot_rel_off =
799  ir_reduce_one_entry->addConstant<ConstantInt>(sizeof(int64_t), Type::Int32);
800  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
801  this_ptr1, second_slot_rel_off, "this_" + desc);
802  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
803  that_ptr1, second_slot_rel_off, "that_" + desc);
804  }
805  reduceOneSlot(this_ptr1,
806  this_ptr2,
807  that_ptr1,
808  that_ptr2,
809  target_info,
810  target_logical_idx,
811  j,
812  init_agg_val_idx,
813  j,
814  ir_reduce_one_entry);
815  if (target_logical_idx + 1 == targets_.size()) {
816  break;
817  }
819  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
820  } else {
821  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
822  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
823  }
824  }
825  j = advance_slot(j, target_info, false);
826  const auto next_desc =
827  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
828  auto next_slot_rel_off = ir_reduce_one_entry->addConstant<ConstantInt>(
829  init_agg_val_idx * sizeof(int64_t), Type::Int32);
830  this_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
831  this_targets_ptr_arg, next_slot_rel_off, next_desc);
832  that_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
833  that_targets_ptr_arg, next_slot_rel_off, next_desc);
834  }
835  ir_reduce_one_entry->add<Ret>(
836  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32));
837 }
838 
840  const ReductionCode& reduction_code) const {
841  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
846  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
847  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
848  const auto entry_idx = ir_reduce_one_entry_idx->arg(2);
849  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
850  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
851  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
852  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
854  const auto entry_idx_64 = ir_reduce_one_entry_idx->add<Cast>(
855  Cast::CastOp::SExt, entry_idx, Type::Int64, "entry_idx_64");
856  const auto row_off_in_bytes = ir_reduce_one_entry_idx->add<BinaryOperator>(
857  BinaryOperator::BinaryOp::Mul, entry_idx_64, row_bytes, "row_off_in_bytes");
858  const auto this_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
859  this_buff, row_off_in_bytes, "this_row_ptr");
860  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
861  that_buff, row_off_in_bytes, "that_row_ptr");
862  const auto reduce_rc = ir_reduce_one_entry_idx->add<Call>(
863  reduction_code.ir_reduce_one_entry.get(),
864  std::vector<const Value*>{this_row_ptr,
865  that_row_ptr,
866  this_qmd_handle,
867  that_qmd_handle,
868  serialized_varlen_buffer_arg},
869  "");
870  ir_reduce_one_entry_idx->add<Ret>(reduce_rc);
871 }
872 
874  const ReductionCode& reduction_code) const {
875  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
880  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
881  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
882  const auto that_entry_idx = ir_reduce_one_entry_idx->arg(2);
883  const auto that_entry_count = ir_reduce_one_entry_idx->arg(3);
884  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
885  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
886  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
887  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
889  const auto that_entry_idx_64 = ir_reduce_one_entry_idx->add<Cast>(
890  Cast::CastOp::SExt, that_entry_idx, Type::Int64, "that_entry_idx_64");
891  const auto that_row_off_in_bytes =
892  ir_reduce_one_entry_idx->add<BinaryOperator>(BinaryOperator::BinaryOp::Mul,
893  that_entry_idx_64,
894  row_bytes,
895  "that_row_off_in_bytes");
896  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
897  that_buff, that_row_off_in_bytes, "that_row_ptr");
898  const auto that_is_empty =
899  ir_reduce_one_entry_idx->add<Call>(reduction_code.ir_is_empty.get(),
900  std::vector<const Value*>{that_row_ptr},
901  "that_is_empty");
902  ir_reduce_one_entry_idx->add<ReturnEarly>(
903  that_is_empty,
904  ir_reduce_one_entry_idx->addConstant<ConstantInt>(0, Type::Int32),
905  "");
906  const auto key_count = query_mem_desc_.getGroupbyColCount();
907  const auto one_element =
908  ir_reduce_one_entry_idx->addConstant<ConstantInt>(1, Type::Int32);
909  const auto this_targets_ptr_i64_ptr = ir_reduce_one_entry_idx->add<Alloca>(
910  Type::Int64Ptr, one_element, "this_targets_ptr_out");
911  const auto this_is_empty_ptr =
912  ir_reduce_one_entry_idx->add<Alloca>(Type::Int8, one_element, "this_is_empty_out");
913  ir_reduce_one_entry_idx->add<ExternalCall>(
914  "get_group_value_reduction_rt",
915  Type::Void,
916  std::vector<const Value*>{
917  this_buff,
918  that_row_ptr,
919  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_count, Type::Int32),
920  this_qmd_handle,
921  that_buff,
922  that_entry_idx,
923  that_entry_count,
924  row_bytes,
925  this_targets_ptr_i64_ptr,
926  this_is_empty_ptr},
927  "");
928  const auto this_targets_ptr_i64 = ir_reduce_one_entry_idx->add<Load>(
929  this_targets_ptr_i64_ptr, "this_targets_ptr_i64");
930  auto this_is_empty =
931  ir_reduce_one_entry_idx->add<Load>(this_is_empty_ptr, "this_is_empty");
932  this_is_empty = ir_reduce_one_entry_idx->add<Cast>(
933  Cast::CastOp::Trunc, this_is_empty, Type::Int1, "this_is_empty_bool");
934  ir_reduce_one_entry_idx->add<ReturnEarly>(
935  this_is_empty,
936  ir_reduce_one_entry_idx->addConstant<ConstantInt>(0, Type::Int32),
937  "");
939  const auto this_targets_ptr = ir_reduce_one_entry_idx->add<Cast>(
940  Cast::CastOp::BitCast, this_targets_ptr_i64, Type::Int8Ptr, "this_targets_ptr");
941  const auto key_byte_count = key_qw_count * sizeof(int64_t);
942  const auto key_byte_count_lv =
943  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_byte_count, Type::Int32);
944  const auto that_targets_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
945  that_row_ptr, key_byte_count_lv, "that_targets_ptr");
946  const auto reduce_rc = ir_reduce_one_entry_idx->add<Call>(
947  reduction_code.ir_reduce_one_entry.get(),
948  std::vector<const Value*>{this_targets_ptr,
949  that_targets_ptr,
950  this_qmd_handle,
951  that_qmd_handle,
952  serialized_varlen_buffer_arg},
953  "");
954  ir_reduce_one_entry_idx->add<Ret>(reduce_rc);
955 }
956 
957 namespace {
958 
959 void generate_loop_body(For* for_loop,
960  Function* ir_reduce_loop,
961  Function* ir_reduce_one_entry_idx,
962  Value* this_buff,
963  Value* that_buff,
964  Value* start_index,
965  Value* that_entry_count,
966  Value* this_qmd_handle,
967  Value* that_qmd_handle,
968  Value* serialized_varlen_buffer) {
969  const auto that_entry_idx = for_loop->add<BinaryOperator>(
970  BinaryOperator::BinaryOp::Add, for_loop->iter(), start_index, "that_entry_idx");
971  const auto watchdog_sample_seed =
972  for_loop->add<Cast>(Cast::CastOp::SExt, that_entry_idx, Type::Int64, "");
973  const auto watchdog_triggered =
974  for_loop->add<ExternalCall>("check_watchdog_rt",
975  Type::Int8,
976  std::vector<const Value*>{watchdog_sample_seed},
977  "");
978  const auto watchdog_triggered_bool =
979  for_loop->add<ICmp>(ICmp::Predicate::NE,
980  watchdog_triggered,
981  ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int8),
982  "");
983  for_loop->add<ReturnEarly>(
984  watchdog_triggered_bool,
985  ir_reduce_loop->addConstant<ConstantInt>(WATCHDOG_ERROR, Type::Int32),
986  "");
987  const auto reduce_rc =
988  for_loop->add<Call>(ir_reduce_one_entry_idx,
989  std::vector<const Value*>{this_buff,
990  that_buff,
991  that_entry_idx,
992  that_entry_count,
993  this_qmd_handle,
994  that_qmd_handle,
995  serialized_varlen_buffer},
996  "");
997 
998  auto reduce_rc_bool =
999  for_loop->add<ICmp>(ICmp::Predicate::NE,
1000  reduce_rc,
1001  ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int32),
1002  "");
1003  for_loop->add<ReturnEarly>(reduce_rc_bool, reduce_rc, "");
1004 }
1005 
1006 } // namespace
1007 
1008 void ResultSetReductionJIT::reduceLoop(const ReductionCode& reduction_code) const {
1009  auto ir_reduce_loop = reduction_code.ir_reduce_loop.get();
1010  const auto this_buff_arg = ir_reduce_loop->arg(0);
1011  const auto that_buff_arg = ir_reduce_loop->arg(1);
1012  const auto start_index_arg = ir_reduce_loop->arg(2);
1013  const auto end_index_arg = ir_reduce_loop->arg(3);
1014  const auto that_entry_count_arg = ir_reduce_loop->arg(4);
1015  const auto this_qmd_handle_arg = ir_reduce_loop->arg(5);
1016  const auto that_qmd_handle_arg = ir_reduce_loop->arg(6);
1017  const auto serialized_varlen_buffer_arg = ir_reduce_loop->arg(7);
1018  For* for_loop =
1019  static_cast<For*>(ir_reduce_loop->add<For>(start_index_arg, end_index_arg, ""));
1020  generate_loop_body(for_loop,
1021  ir_reduce_loop,
1022  reduction_code.ir_reduce_one_entry_idx.get(),
1023  this_buff_arg,
1024  that_buff_arg,
1025  start_index_arg,
1026  that_entry_count_arg,
1027  this_qmd_handle_arg,
1028  that_qmd_handle_arg,
1029  serialized_varlen_buffer_arg);
1030  ir_reduce_loop->add<Ret>(ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int32));
1031 }
1032 
1034  Value* this_ptr2,
1035  Value* that_ptr1,
1036  Value* that_ptr2,
1037  const TargetInfo& target_info,
1038  const size_t target_logical_idx,
1039  const size_t target_slot_idx,
1040  const size_t init_agg_val_idx,
1041  const size_t first_slot_idx_for_target,
1042  Function* ir_reduce_one_entry) const {
1044  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
1045  return;
1046  }
1047  }
1048  const bool float_argument_input = takes_float_argument(target_info);
1049  const auto chosen_bytes =
1050  get_width_for_slot(target_slot_idx, float_argument_input, query_mem_desc_);
1051  CHECK_LT(init_agg_val_idx, target_init_vals_.size());
1052  auto init_val = target_init_vals_[init_agg_val_idx];
1053  if (target_info.is_agg &&
1054  (target_info.agg_kind != kSINGLE_VALUE && target_info.agg_kind != kSAMPLE)) {
1055  reduceOneAggregateSlot(this_ptr1,
1056  this_ptr2,
1057  that_ptr1,
1058  that_ptr2,
1059  target_info,
1060  target_logical_idx,
1061  target_slot_idx,
1062  init_val,
1063  chosen_bytes,
1064  ir_reduce_one_entry);
1065  } else if (target_info.agg_kind == kSINGLE_VALUE) {
1066  const auto checked_rc = emit_checked_write_projection(
1067  this_ptr1, that_ptr1, init_val, chosen_bytes, ir_reduce_one_entry);
1068 
1069  auto checked_rc_bool = ir_reduce_one_entry->add<ICmp>(
1071  checked_rc,
1072  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32),
1073  "");
1074 
1075  ir_reduce_one_entry->add<ReturnEarly>(checked_rc_bool, checked_rc, "");
1076 
1077  } else {
1079  this_ptr1, that_ptr1, init_val, chosen_bytes, ir_reduce_one_entry);
1080  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()) {
1081  CHECK(this_ptr2 && that_ptr2);
1082  size_t length_to_elems{0};
1083  if (target_info.sql_type.is_geometry()) {
1084  // TODO: Assumes hard-coded sizes for geometry targets
1085  length_to_elems = target_slot_idx == first_slot_idx_for_target ? 1 : 4;
1086  } else {
1087  const auto& elem_ti = target_info.sql_type.get_elem_type();
1088  length_to_elems = target_info.sql_type.is_string() ? 1 : elem_ti.get_size();
1089  }
1090  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry->arg(4);
1091  ir_reduce_one_entry->add<ExternalCall>(
1092  "serialized_varlen_buffer_sample",
1093  Type::Void,
1094  std::vector<const Value*>{
1095  serialized_varlen_buffer_arg,
1096  this_ptr1,
1097  this_ptr2,
1098  that_ptr1,
1099  that_ptr2,
1100  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64),
1101  ir_reduce_one_entry->addConstant<ConstantInt>(length_to_elems,
1102  Type::Int64)},
1103  "");
1104  }
1105  }
1106 }
1107 
1109  Value* this_ptr2,
1110  Value* that_ptr1,
1111  Value* that_ptr2,
1112  const TargetInfo& target_info,
1113  const size_t target_logical_idx,
1114  const size_t target_slot_idx,
1115  const int64_t init_val,
1116  const int8_t chosen_bytes,
1117  Function* ir_reduce_one_entry) const {
1118  switch (target_info.agg_kind) {
1119  case kCOUNT:
1120  case kAPPROX_COUNT_DISTINCT: {
1121  if (is_distinct_target(target_info)) {
1122  CHECK_EQ(static_cast<size_t>(chosen_bytes), sizeof(int64_t));
1124  this_ptr1, that_ptr1, target_logical_idx, ir_reduce_one_entry);
1125  break;
1126  }
1127  CHECK_EQ(int64_t(0), init_val);
1128  emit_aggregate_one_count(this_ptr1, that_ptr1, chosen_bytes, ir_reduce_one_entry);
1129  break;
1130  }
1131  case kAVG: {
1132  // Ignore float argument compaction for count component for fear of its overflow
1133  emit_aggregate_one_count(this_ptr2,
1134  that_ptr2,
1135  query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx),
1136  ir_reduce_one_entry);
1137  }
1138  // fall thru
1139  case kSUM: {
1141  this_ptr1,
1142  that_ptr1,
1143  init_val,
1144  chosen_bytes,
1145  target_info,
1146  ir_reduce_one_entry);
1147  break;
1148  }
1149  case kMIN: {
1151  this_ptr1,
1152  that_ptr1,
1153  init_val,
1154  chosen_bytes,
1155  target_info,
1156  ir_reduce_one_entry);
1157  break;
1158  }
1159  case kMAX: {
1161  this_ptr1,
1162  that_ptr1,
1163  init_val,
1164  chosen_bytes,
1165  target_info,
1166  ir_reduce_one_entry);
1167  break;
1168  }
1169  default:
1170  LOG(FATAL) << "Invalid aggregate type";
1171  }
1172 }
1173 
1175  Value* this_ptr1,
1176  Value* that_ptr1,
1177  const size_t target_logical_idx,
1178  Function* ir_reduce_one_entry) const {
1180  const auto old_set_handle = emit_load_i64(this_ptr1, ir_reduce_one_entry);
1181  const auto new_set_handle = emit_load_i64(that_ptr1, ir_reduce_one_entry);
1182  const auto this_qmd_arg = ir_reduce_one_entry->arg(2);
1183  const auto that_qmd_arg = ir_reduce_one_entry->arg(3);
1184  ir_reduce_one_entry->add<ExternalCall>(
1185  "count_distinct_set_union_jit_rt",
1186  Type::Void,
1187  std::vector<const Value*>{
1188  new_set_handle,
1189  old_set_handle,
1190  that_qmd_arg,
1191  this_qmd_arg,
1192  ir_reduce_one_entry->addConstant<ConstantInt>(target_logical_idx, Type::Int64)},
1193  "");
1194 }
1195 
1197  ReductionCode reduction_code,
1198  const llvm::Function* ir_is_empty,
1199  const llvm::Function* ir_reduce_one_entry,
1200  const llvm::Function* ir_reduce_one_entry_idx,
1201  const CodeCacheKey& key) const {
1202  CompilationOptions co{
1204 
1205  LOG(IR) << "Reduction Loop:\n"
1206  << serialize_llvm_object(reduction_code.llvm_reduce_loop);
1207  LOG(IR) << "Reduction Is Empty Func:\n" << serialize_llvm_object(ir_is_empty);
1208  LOG(IR) << "Reduction One Entry Func:\n" << serialize_llvm_object(ir_reduce_one_entry);
1209  LOG(IR) << "Reduction One Entry Idx Func:\n"
1210  << serialize_llvm_object(ir_reduce_one_entry_idx);
1211 
1212  reduction_code.module.release();
1214  reduction_code.llvm_reduce_loop, {reduction_code.llvm_reduce_loop}, co);
1215  reduction_code.func_ptr = reinterpret_cast<ReductionCode::FuncPtr>(
1216  ee->getPointerToFunction(reduction_code.llvm_reduce_loop));
1217 
1218  auto cpu_compilation_context = std::make_shared<CpuCompilationContext>(std::move(ee));
1219  cpu_compilation_context->setFunctionPointer(reduction_code.llvm_reduce_loop);
1220  reduction_code.compilation_context = cpu_compilation_context;
1222  reduction_code.compilation_context,
1223  reduction_code.llvm_reduce_loop->getParent(),
1224  s_code_cache);
1225  return reduction_code;
1226 }
1227 
1228 namespace {
1229 
1230 std::string target_info_key(const TargetInfo& target_info) {
1231  return std::to_string(target_info.is_agg) + "\n" +
1232  std::to_string(target_info.agg_kind) + "\n" +
1233  target_info.sql_type.get_type_name() + "\n" +
1234  std::to_string(target_info.sql_type.get_notnull()) + "\n" +
1235  target_info.agg_arg_type.get_type_name() + "\n" +
1236  std::to_string(target_info.agg_arg_type.get_notnull()) + "\n" +
1237  std::to_string(target_info.skip_null_val) + "\n" +
1238  std::to_string(target_info.is_distinct);
1239 }
1240 
1241 } // namespace
1242 
1243 std::string ResultSetReductionJIT::cacheKey() const {
1244  std::vector<std::string> target_init_vals_strings;
1245  std::transform(target_init_vals_.begin(),
1246  target_init_vals_.end(),
1247  std::back_inserter(target_init_vals_strings),
1248  [](const int64_t v) { return std::to_string(v); });
1249  const auto target_init_vals_key =
1250  boost::algorithm::join(target_init_vals_strings, ", ");
1251  std::vector<std::string> targets_strings;
1252  std::transform(
1253  targets_.begin(),
1254  targets_.end(),
1255  std::back_inserter(targets_strings),
1256  [](const TargetInfo& target_info) { return target_info_key(target_info); });
1257  const auto targets_key = boost::algorithm::join(targets_strings, ", ");
1258  return query_mem_desc_.reductionKey() + "\n" + target_init_vals_key + "\n" +
1259  targets_key;
1260 }
1261 
1263  const auto hash_type = query_mem_desc_.getQueryDescriptionType();
1264  auto reduction_code = setup_functions_ir(hash_type);
1266  isEmpty(reduction_code);
1267  reduceOneEntryNoCollisions(reduction_code);
1268  reduceOneEntryNoCollisionsIdx(reduction_code);
1269  reduceLoop(reduction_code);
1270  reduction_code.cgen_state.reset(new CgenState({}, false));
1271  auto cgen_state = reduction_code.cgen_state.get();
1272  std::unique_ptr<llvm::Module> module(runtime_module_shallow_copy(cgen_state));
1273 
1274  cgen_state->module_ = module.get();
1275  auto ir_is_empty = create_llvm_function(reduction_code.ir_is_empty.get(), cgen_state);
1276  auto ir_reduce_one_entry =
1277  create_llvm_function(reduction_code.ir_reduce_one_entry.get(), cgen_state);
1278  auto ir_reduce_one_entry_idx =
1280  auto ir_reduce_loop =
1281  create_llvm_function(reduction_code.ir_reduce_loop.get(), cgen_state);
1282  std::unordered_map<const Function*, llvm::Function*> f;
1283  f.emplace(reduction_code.ir_is_empty.get(), ir_is_empty);
1284  f.emplace(reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry);
1285  f.emplace(reduction_code.ir_reduce_one_entry_idx.get(), ir_reduce_one_entry_idx);
1286  f.emplace(reduction_code.ir_reduce_loop.get(), ir_reduce_loop);
1287  translate_function(reduction_code.ir_is_empty.get(), ir_is_empty, reduction_code, f);
1289  reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry, reduction_code, f);
1290  translate_function(reduction_code.ir_reduce_one_entry_idx.get(),
1291  ir_reduce_one_entry_idx,
1292  reduction_code,
1293  f);
1295  reduction_code.ir_reduce_loop.get(), ir_reduce_loop, reduction_code, f);
1296  reduction_code.llvm_reduce_loop = ir_reduce_loop;
1297  reduction_code.module = std::move(module);
1298  return reduction_code;
1299 }
void emit_aggregate_one_nullable_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const int64_t init_val, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
std::unique_ptr< CgenState > cgen_state
void clear()
Definition: LruCache.hpp:57
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void reduceOneSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const size_t init_agg_val_idx, const size_t first_slot_idx_for_target, Function *ir_reduce_one_entry) const
const int32_t groups_buffer_size return groups_buffer
bool is_aggregate_query(const QueryDescriptionType hash_type)
std::unique_ptr< llvm::Module > module(runtime_module_shallow_copy(cgen_state))
void count_distinct_set_union(const int64_t new_set_handle, const int64_t old_set_handle, const CountDistinctDescriptor &new_count_distinct_desc, const CountDistinctDescriptor &old_count_distinct_desc)
__device__ bool dynamic_watchdog()
#define EMPTY_KEY_64
void count_distinct_set_union_jit_rt(const int64_t new_set_handle, const int64_t old_set_handle, const void *that_qmd_handle, const void *this_qmd_handle, const int64_t target_logical_idx)
const std::string & label() const
std::unique_ptr< llvm::Module > runtime_module_shallow_copy(CgenState *cgen_state)
void reduceOneEntryNoCollisions(const ReductionCode &reduction_code) const
void serialized_varlen_buffer_sample(const void *serialized_varlen_buffer_handle, int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val, const int64_t length_to_elems)
std::shared_ptr< CompilationContext > compilation_context
void varlen_buffer_sample(int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val)
std::unique_ptr< Function > ir_reduce_loop
Value * emit_read_int_from_buff(Value *ptr, const int8_t compact_sz, Function *function)
void reduceOneEntryBaselineIdx(const ReductionCode &reduction_code) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define LOG(tag)
Definition: Logger.h:188
void mark_function_always_inline(llvm::Function *func)
void get_group_value_reduction_rt(int8_t *groups_buffer, const int8_t *key, const uint32_t key_count, const void *this_qmd_handle, const int8_t *that_buff, const uint32_t that_entry_idx, const uint32_t that_entry_count, const uint32_t row_size_bytes, int64_t **buff_out, uint8_t *empty)
void reduceLoop(const ReductionCode &reduction_code) const
bool is_varlen() const
Definition: sqltypes.h:430
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
std::string join(T const &container, std::string const &delim)
llvm::Function * llvm_reduce_loop
void reduceOneEntryNoCollisionsIdx(const ReductionCode &reduction_code) const
#define CHECK_GE(x, y)
Definition: Logger.h:210
std::vector< std::string > CodeCacheKey
Definition: CodeCache.h:25
ReductionCode finalizeReductionCode(ReductionCode reduction_code, const llvm::Function *ir_is_empty, const llvm::Function *ir_reduce_one_entry, const llvm::Function *ir_reduce_one_entry_idx, const CodeCacheKey &key) const
size_t get_slot_off_quad(const QueryMemoryDescriptor &query_mem_desc)
std::string cacheKey() const
size_t getEffectiveKeyWidth() const
std::unique_ptr< Function > ir_reduce_one_entry
bool g_enable_dynamic_watchdog
Definition: Execute.cpp:75
static ExecutionEngineWrapper generateNativeCPUCode(llvm::Function *func, const std::unordered_set< llvm::Function * > &live_funcs, const CompilationOptions &co)
const std::vector< int64_t > target_init_vals_
void reduceOneAggregateSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const int64_t init_val, const int8_t chosen_bytes, Function *ir_reduce_one_entry) const
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:133
Value * add(Args &&...args)
bool skip_null_val
Definition: TargetInfo.h:44
const Value * emit_checked_write_projection(Value *slot_pi8, Value *other_pi8, const int64_t init_val, const size_t chosen_bytes, Function *ir_reduce_one_entry)
int8_t get_width_for_slot(const size_t target_slot_idx, const bool float_argument_input, const QueryMemoryDescriptor &query_mem_desc)
const int64_t const uint32_t const uint32_t key_qw_count
std::unique_ptr< Function > setup_reduce_one_entry_idx(ReductionCode *reduction_code)
int32_t(*)(int8_t *this_buff, const int8_t *that_buff, const int32_t start_entry_index, const int32_t end_entry_index, const int32_t that_entry_count, const void *this_qmd, const void *that_qmd, const void *serialized_varlen_buffer) FuncPtr
const QueryMemoryDescriptor query_mem_desc_
std::unique_ptr< Function > ir_is_empty
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
void translate_function(const Function *function, llvm::Function *llvm_function, const ReductionCode &reduction_code, const std::unordered_map< const Function *, llvm::Function * > &f)
void emit_aggregate_one_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
Value * emit_load_i32(Value *ptr, Function *function)
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
false auto cgen_state
llvm::Module * module_
Definition: CgenState.h:314
ResultSetReductionJIT(const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const std::vector< int64_t > &target_init_vals)
llvm::LLVMContext & context_
Definition: CgenState.h:317
bool is_agg
Definition: TargetInfo.h:40
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
CHECK(cgen_state)
void reduceOneCountDistinctSlot(Value *this_ptr1, Value *that_ptr1, const size_t target_logical_idx, Function *ir_reduce_one_entry) const
size_t getGroupbyColCount() const
GroupValueInfo get_group_value_reduction(int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const size_t key_width, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
size_t targetGroupbyIndicesSize() const
void generate_loop_body(For *for_loop, Function *ir_reduce_loop, Function *ir_reduce_one_entry_idx, Value *this_buff, Value *that_buff, Value *start_index, Value *that_entry_count, Value *this_qmd_handle, Value *that_qmd_handle, Value *serialized_varlen_buffer)
void emit_write_projection(Value *slot_pi8, Value *other_pi8, const int64_t init_val, const size_t chosen_bytes, Function *ir_reduce_one_entry)
std::unique_ptr< llvm::Module > g_rt_module
llvm::Function * create_llvm_function(const Function *function, const CgenState *cgen_state)
uint8_t check_watchdog_rt(const size_t sample_seed)
Definition: sqldefs.h:75
static std::mutex s_reduction_mutex
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
std::string target_info_key(const TargetInfo &target_info)
std::unique_ptr< Function > ir_reduce_one_entry_idx
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ReductionCode setup_functions_ir(const QueryDescriptionType hash_type)
std::unique_ptr< Function > setup_is_empty_entry(ReductionCode *reduction_code)
SQLAgg agg_kind
Definition: TargetInfo.h:41
size_t getCountDistinctDescriptorsSize() const
void reduceOneEntryTargetsNoCollisions(Function *ir_reduce_one_entry, Value *this_targets_start_ptr, Value *that_targets_start_ptr) const
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
QueryDescriptionType getQueryDescriptionType() const
static void addCodeToCache(const CodeCacheKey &, std::shared_ptr< CompilationContext >, llvm::Module *, CodeCache &)
#define UNLIKELY(x)
Definition: likely.h:20
llvm::Type * llvm_type(const Type type, llvm::LLVMContext &ctx)
virtual ReductionCode codegen() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
std::string serialize_llvm_object(const T *llvm_obj)
size_t get_row_bytes(const QueryMemoryDescriptor &query_mem_desc)
virtual ReductionCode codegen() const
Definition: sqldefs.h:76
std::unique_ptr< Function > create_function(const std::string name, const std::vector< Function::NamedArg > &arg_types, const Type ret_type, const bool always_inline)
std::string get_type_name() const
Definition: sqltypes.h:361
const Value * iter() const
std::unique_ptr< Function > setup_reduce_one_entry(ReductionCode *reduction_code, const QueryDescriptionType hash_type)
void isEmpty(const ReductionCode &reduction_code) const
Value * emit_load_i64(Value *ptr, Function *function)
const ColSlotContext & getColSlotContext() const
bool is_geometry() const
Definition: sqltypes.h:427
#define EMPTY_KEY_32
std::unique_ptr< llvm::Module > module
void reduceOneEntryBaseline(const ReductionCode &reduction_code) const
QueryDescriptionType
Definition: Types.h:26
Value * emit_load(Value *ptr, Type ptr_type, Function *function)
bool is_string() const
Definition: sqltypes.h:415
value_t * get(const key_t &key)
Definition: LruCache.hpp:39
bool is_distinct
Definition: TargetInfo.h:45
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:265
void emit_aggregate_one_count(Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, Function *ir_reduce_one_entry)
Definition: sqldefs.h:74
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:622
Definition: sqldefs.h:72
std::unique_ptr< Function > setup_reduce_loop(ReductionCode *reduction_code)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
std::string reductionKey() const
const Executor * getExecutor() const
int32_t getTargetIdxForKey() const
const std::vector< TargetInfo > targets_