OmniSciDB  1dac507f6e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
ResultSetReductionJIT.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ResultSetReductionJIT.h"
20 
21 #include "CodeGenerator.h"
22 #include "DynamicWatchdog.h"
23 #include "Execute.h"
24 #include "IRCodegenUtils.h"
26 
27 #include "Shared/likely.h"
28 #include "Shared/mapdpath.h"
29 
30 #include <llvm/Bitcode/BitcodeReader.h>
31 #include <llvm/IR/Function.h>
32 #include <llvm/IR/IRBuilder.h>
33 #include <llvm/IR/Verifier.h>
34 #include <llvm/Support/SourceMgr.h>
35 #include <llvm/Support/raw_os_ostream.h>
36 
37 extern std::unique_ptr<llvm::Module> g_rt_module;
38 
40 
42 
43 namespace {
44 
45 // Error code to be returned when the watchdog timer triggers during the reduction.
46 const int32_t WATCHDOG_ERROR{-1};
47 // Use the interpreter, not the JIT, for a number of entries lower than the threshold.
48 const size_t INTERP_THRESHOLD{25};
49 
50 // Load the value stored at 'ptr' interpreted as 'ptr_type'.
51 Value* emit_load(Value* ptr, Type ptr_type, Function* function) {
52  return function->add<Load>(
53  function->add<Cast>(Cast::CastOp::BitCast, ptr, ptr_type, ""),
54  ptr->label() + "_loaded");
55 }
56 
57 // Load the value stored at 'ptr' as a 32-bit signed integer.
58 Value* emit_load_i32(Value* ptr, Function* function) {
59  return emit_load(ptr, Type::Int32Ptr, function);
60 }
61 
62 // Load the value stored at 'ptr' as a 64-bit signed integer.
63 Value* emit_load_i64(Value* ptr, Function* function) {
64  return emit_load(ptr, Type::Int64Ptr, function);
65 }
66 
67 // Read a 32- or 64-bit integer stored at 'ptr' and sign extend to 64-bit.
68 Value* emit_read_int_from_buff(Value* ptr, const int8_t compact_sz, Function* function) {
69  switch (compact_sz) {
70  case 8: {
71  return emit_load_i64(ptr, function);
72  }
73  case 4: {
74  const auto loaded_val = emit_load_i32(ptr, function);
75  return function->add<Cast>(Cast::CastOp::SExt, loaded_val, Type::Int64, "");
76  }
77  default: {
78  LOG(FATAL) << "Invalid byte width: " << compact_sz;
79  return nullptr;
80  }
81  }
82 }
83 
84 // Emit a runtime call to accumulate into the 'val_ptr' byte address the 'other_ptr'
85 // value when the type is specified as not null.
86 void emit_aggregate_one_value(const std::string& agg_kind,
87  Value* val_ptr,
88  Value* other_ptr,
89  const size_t chosen_bytes,
90  const TargetInfo& agg_info,
91  Function* ir_reduce_one_entry) {
92  const auto sql_type = get_compact_type(agg_info);
93  const auto dest_name = agg_kind + "_dest";
94  if (sql_type.is_fp()) {
95  if (chosen_bytes == sizeof(float)) {
96  const auto agg = ir_reduce_one_entry->add<Cast>(
97  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
98  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
99  ir_reduce_one_entry->add<Call>(
100  "agg_" + agg_kind + "_float", std::vector<const Value*>{agg, val}, "");
101  } else {
102  CHECK_EQ(chosen_bytes, sizeof(double));
103  const auto agg = ir_reduce_one_entry->add<Cast>(
104  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
105  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
106  ir_reduce_one_entry->add<Call>(
107  "agg_" + agg_kind + "_double", std::vector<const Value*>{agg, val}, "");
108  }
109  } else {
110  if (chosen_bytes == sizeof(int32_t)) {
111  const auto agg = ir_reduce_one_entry->add<Cast>(
112  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
113  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
114  ir_reduce_one_entry->add<Call>(
115  "agg_" + agg_kind + "_int32", std::vector<const Value*>{agg, val}, "");
116  } else {
117  CHECK_EQ(chosen_bytes, sizeof(int64_t));
118  const auto agg = ir_reduce_one_entry->add<Cast>(
119  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
120  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
121  ir_reduce_one_entry->add<Call>(
122  "agg_" + agg_kind, std::vector<const Value*>{agg, val}, "");
123  }
124  }
125 }
126 
127 // Same as above, but support nullable types as well.
128 void emit_aggregate_one_nullable_value(const std::string& agg_kind,
129  Value* val_ptr,
130  Value* other_ptr,
131  const int64_t init_val,
132  const size_t chosen_bytes,
133  const TargetInfo& agg_info,
134  Function* ir_reduce_one_entry) {
135  const auto dest_name = agg_kind + "_dest";
136  if (agg_info.skip_null_val) {
137  const auto sql_type = get_compact_type(agg_info);
138  if (sql_type.is_fp()) {
139  if (chosen_bytes == sizeof(float)) {
140  const auto agg = ir_reduce_one_entry->add<Cast>(
141  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
142  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
143  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
144  *reinterpret_cast<const float*>(may_alias_ptr(&init_val)), Type::Float);
145  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_float_skip_val",
146  std::vector<const Value*>{agg, val, init_val_lv},
147  "");
148  } else {
149  CHECK_EQ(chosen_bytes, sizeof(double));
150  const auto agg = ir_reduce_one_entry->add<Cast>(
151  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
152  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
153  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
154  *reinterpret_cast<const double*>(may_alias_ptr(&init_val)), Type::Double);
155  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_double_skip_val",
156  std::vector<const Value*>{agg, val, init_val_lv},
157  "");
158  }
159  } else {
160  if (chosen_bytes == sizeof(int32_t)) {
161  const auto agg = ir_reduce_one_entry->add<Cast>(
162  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
163  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
164  const auto init_val_lv =
165  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int32);
166  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_int32_skip_val",
167  std::vector<const Value*>{agg, val, init_val_lv},
168  "");
169  } else {
170  CHECK_EQ(chosen_bytes, sizeof(int64_t));
171  const auto agg = ir_reduce_one_entry->add<Cast>(
172  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
173  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
174  const auto init_val_lv =
175  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64);
176  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_skip_val",
177  std::vector<const Value*>{agg, val, init_val_lv},
178  "");
179  }
180  }
181  } else {
183  agg_kind, val_ptr, other_ptr, chosen_bytes, agg_info, ir_reduce_one_entry);
184  }
185 }
186 
187 // Emit code to accumulate the 'other_ptr' count into the 'val_ptr' destination.
189  Value* other_ptr,
190  const size_t chosen_bytes,
191  Function* ir_reduce_one_entry) {
192  const auto dest_name = "count_dest";
193  if (chosen_bytes == sizeof(int32_t)) {
194  const auto agg = ir_reduce_one_entry->add<Cast>(
195  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
196  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
197  ir_reduce_one_entry->add<Call>(
198  "agg_sum_int32", std::vector<const Value*>{agg, val}, "");
199  } else {
200  CHECK_EQ(chosen_bytes, sizeof(int64_t));
201  const auto agg = ir_reduce_one_entry->add<Cast>(
202  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
203  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
204  ir_reduce_one_entry->add<Call>("agg_sum", std::vector<const Value*>{agg, val}, "");
205  }
206 }
207 
208 // Emit code to load the value stored at the 'other_pi8' as an integer of the given width
209 // 'chosen_bytes' and write it to the 'slot_pi8' destination only if necessary (the
210 // existing value at destination is the initialization value).
212  Value* other_pi8,
213  const int64_t init_val,
214  const size_t chosen_bytes,
215  Function* ir_reduce_one_entry) {
216  const auto func_name = "write_projection_int" + std::to_string(chosen_bytes * 8);
217  if (chosen_bytes == sizeof(int32_t)) {
218  const auto proj_val = emit_load_i32(other_pi8, ir_reduce_one_entry);
219  ir_reduce_one_entry->add<Call>(
220  func_name,
221  std::vector<const Value*>{
222  slot_pi8,
223  proj_val,
224  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
225  "");
226  } else {
227  CHECK_EQ(chosen_bytes, sizeof(int64_t));
228  const auto proj_val = emit_load_i64(other_pi8, ir_reduce_one_entry);
229  ir_reduce_one_entry->add<Call>(
230  func_name,
231  std::vector<const Value*>{
232  slot_pi8,
233  proj_val,
234  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
235  "");
236  }
237 }
238 
239 std::unique_ptr<Function> create_function(
240  const std::string name,
241  const std::vector<Function::NamedArg>& arg_types,
242  const Type ret_type,
243  const bool always_inline) {
244  return std::make_unique<Function>(name, arg_types, ret_type, always_inline);
245 }
246 
247 // Create the declaration for the 'is_empty_entry' function. Use private linkage since
248 // it's a helper only called from the generated code and mark it as always inline.
249 std::unique_ptr<Function> setup_is_empty_entry(ReductionCode* reduction_code) {
250  return create_function(
251  "is_empty_entry", {{"row_ptr", Type::Int8Ptr}}, Type::Int1, /*always_inline=*/true);
252 }
253 
254 // Create the declaration for the 'reduce_one_entry' helper.
255 std::unique_ptr<Function> setup_reduce_one_entry(ReductionCode* reduction_code,
256  const QueryDescriptionType hash_type) {
257  std::string this_ptr_name;
258  std::string that_ptr_name;
259  switch (hash_type) {
261  this_ptr_name = "this_targets_ptr";
262  that_ptr_name = "that_targets_ptr";
263  break;
264  }
267  this_ptr_name = "this_row_ptr";
268  that_ptr_name = "that_row_ptr";
269  break;
270  }
271  default: {
272  LOG(FATAL) << "Unexpected query description type";
273  }
274  }
275  return create_function("reduce_one_entry",
276  {{this_ptr_name, Type::Int8Ptr},
277  {that_ptr_name, Type::Int8Ptr},
278  {"this_qmd", Type::VoidPtr},
279  {"that_qmd", Type::VoidPtr},
280  {"serialized_varlen_buffer_arg", Type::VoidPtr}},
281  Type::Void,
282  /*always_inline=*/true);
283 }
284 
285 // Create the declaration for the 'reduce_one_entry_idx' helper.
286 std::unique_ptr<Function> setup_reduce_one_entry_idx(ReductionCode* reduction_code) {
287  return create_function("reduce_one_entry_idx",
288  {{"this_buff", Type::Int8Ptr},
289  {"that_buff", Type::Int8Ptr},
290  {"that_entry_idx", Type::Int32},
291  {"that_entry_count", Type::Int32},
292  {"this_qmd_handle", Type::VoidPtr},
293  {"that_qmd_handle", Type::VoidPtr},
294  {"serialized_varlen_buffer", Type::VoidPtr}},
295  Type::Void,
296  /*always_inline=*/true);
297 }
298 
299 // Create the declaration for the 'reduce_loop' entry point. Use external linkage, this is
300 // the public API of the generated code directly used from result set reduction.
301 std::unique_ptr<Function> setup_reduce_loop(ReductionCode* reduction_code) {
302  return create_function("reduce_loop",
303  {{"this_buff", Type::Int8Ptr},
304  {"that_buff", Type::Int8Ptr},
305  {"start_index", Type::Int32},
306  {"end_index", Type::Int32},
307  {"that_entry_count", Type::Int32},
308  {"this_qmd_handle", Type::VoidPtr},
309  {"that_qmd_handle", Type::VoidPtr},
310  {"serialized_varlen_buffer", Type::VoidPtr}},
311  Type::Int32,
312  /*always_inline=*/false);
313 }
314 
315 llvm::Function* create_llvm_function(const Function* function,
316  const CgenState* cgen_state) {
317  auto& ctx = cgen_state->context_;
318  std::vector<llvm::Type*> parameter_types;
319  const auto& arg_types = function->arg_types();
320  for (const auto& named_arg : arg_types) {
321  CHECK(named_arg.type != Type::Void);
322  parameter_types.push_back(llvm_type(named_arg.type, ctx));
323  }
324  const auto func_type = llvm::FunctionType::get(
325  llvm_type(function->ret_type(), ctx), parameter_types, false);
326  const auto linkage = function->always_inline() ? llvm::Function::PrivateLinkage
327  : llvm::Function::ExternalLinkage;
328  auto func =
329  llvm::Function::Create(func_type, linkage, function->name(), cgen_state->module_);
330  const auto arg_it = func->arg_begin();
331  for (size_t i = 0; i < arg_types.size(); ++i) {
332  const auto arg = &*(arg_it + i);
333  arg->setName(arg_types[i].name);
334  }
335  if (function->always_inline()) {
337  }
338  return func;
339 }
340 
341 // Setup the reduction function and helpers declarations, create a module and a code
342 // generation state object.
344  ReductionCode reduction_code{};
345  reduction_code.ir_is_empty = setup_is_empty_entry(&reduction_code);
346  reduction_code.ir_reduce_one_entry = setup_reduce_one_entry(&reduction_code, hash_type);
347  reduction_code.ir_reduce_one_entry_idx = setup_reduce_one_entry_idx(&reduction_code);
348  reduction_code.ir_reduce_loop = setup_reduce_loop(&reduction_code);
349  return reduction_code;
350 }
351 
353  return hash_type == QueryDescriptionType::GroupByBaselineHash ||
356 }
357 
358 // Variable length sample fast path (no serialized variable length buffer).
359 void varlen_buffer_sample(int8_t* this_ptr1,
360  int8_t* this_ptr2,
361  const int8_t* that_ptr1,
362  const int8_t* that_ptr2,
363  const int64_t init_val) {
364  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
365  if (rhs_proj_col != init_val) {
366  *reinterpret_cast<int64_t*>(this_ptr1) = rhs_proj_col;
367  }
368  CHECK(this_ptr2 && that_ptr2);
369  *reinterpret_cast<int64_t*>(this_ptr2) = *reinterpret_cast<const int64_t*>(that_ptr2);
370 }
371 
372 } // namespace
373 
375  const void* serialized_varlen_buffer_handle,
376  int8_t* this_ptr1,
377  int8_t* this_ptr2,
378  const int8_t* that_ptr1,
379  const int8_t* that_ptr2,
380  const int64_t init_val,
381  const int64_t length_to_elems) {
382  if (!serialized_varlen_buffer_handle) {
383  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
384  return;
385  }
386  const auto& serialized_varlen_buffer =
387  *reinterpret_cast<const std::vector<std::string>*>(serialized_varlen_buffer_handle);
388  if (!serialized_varlen_buffer.empty()) {
389  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
390  CHECK_LT(static_cast<size_t>(rhs_proj_col), serialized_varlen_buffer.size());
391  const auto& varlen_bytes_str = serialized_varlen_buffer[rhs_proj_col];
392  const auto str_ptr = reinterpret_cast<const int8_t*>(varlen_bytes_str.c_str());
393  *reinterpret_cast<int64_t*>(this_ptr1) = reinterpret_cast<const int64_t>(str_ptr);
394  *reinterpret_cast<int64_t*>(this_ptr2) =
395  static_cast<int64_t>(varlen_bytes_str.size() / length_to_elems);
396  } else {
397  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
398  }
399 }
400 
401 // Wrappers to be called from the generated code, sharing implementation with the rest of
402 // the system.
403 
404 extern "C" void count_distinct_set_union_jit_rt(const int64_t new_set_handle,
405  const int64_t old_set_handle,
406  const void* that_qmd_handle,
407  const void* this_qmd_handle,
408  const int64_t target_logical_idx) {
409  const auto that_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(that_qmd_handle);
410  const auto this_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
411  const auto& new_count_distinct_desc =
412  that_qmd->getCountDistinctDescriptor(target_logical_idx);
413  const auto& old_count_distinct_desc =
414  this_qmd->getCountDistinctDescriptor(target_logical_idx);
415  CHECK(old_count_distinct_desc.impl_type_ != CountDistinctImplType::Invalid);
416  CHECK(old_count_distinct_desc.impl_type_ == new_count_distinct_desc.impl_type_);
418  new_set_handle, old_set_handle, new_count_distinct_desc, old_count_distinct_desc);
419 }
420 
422  const int8_t* key,
423  const uint32_t key_count,
424  const void* this_qmd_handle,
425  const int8_t* that_buff,
426  const uint32_t that_entry_idx,
427  const uint32_t that_entry_count,
428  const uint32_t row_size_bytes,
429  int64_t** buff_out,
430  uint8_t* empty) {
431  const auto& this_qmd = *reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
432  const auto gvi = get_group_value_reduction(reinterpret_cast<int64_t*>(groups_buffer),
433  this_qmd.getEntryCount(),
434  reinterpret_cast<const int64_t*>(key),
435  key_count,
436  this_qmd.getEffectiveKeyWidth(),
437  this_qmd,
438  reinterpret_cast<const int64_t*>(that_buff),
439  that_entry_idx,
440  that_entry_count,
441  row_size_bytes >> 3);
442  *buff_out = gvi.first;
443  *empty = gvi.second;
444 }
445 
446 extern "C" uint8_t check_watchdog_rt(const size_t sample_seed) {
447  if (UNLIKELY(g_enable_dynamic_watchdog && (sample_seed & 0x3F) == 0 &&
448  dynamic_watchdog())) {
449  return true;
450  }
451  return false;
452 }
453 
455  const std::vector<TargetInfo>& targets,
456  const std::vector<int64_t>& target_init_vals)
457  : query_mem_desc_(query_mem_desc)
458  , targets_(targets)
459  , target_init_vals_(target_init_vals) {}
460 
461 // The code generated for a reduction between two result set buffers is structured in
462 // several functions and their IR is stored in the 'ReductionCode' structure. At a high
463 // level, the pseudocode is:
464 //
465 // func is_empty_func(row_ptr):
466 // ...
467 //
468 // func reduce_func_baseline(this_ptr, that_ptr):
469 // if is_empty_func(that_ptr):
470 // return
471 // for each target in the row:
472 // reduce target from that_ptr into this_ptr
473 //
474 // func reduce_func_perfect_hash(this_ptr, that_ptr):
475 // if is_empty_func(that_ptr):
476 // return
477 // for each target in the row:
478 // reduce target from that_ptr into this_ptr
479 //
480 // func reduce_func_idx(this_buff, that_buff, that_entry_index):
481 // that_ptr = that_result_set[that_entry_index]
482 // # Retrieval of 'this_ptr' is different between perfect hash and baseline.
483 // this_ptr = this_result_set[that_entry_index]
484 // or
485 // get_row(key(that_row_ptr), this_result_set_buffer)
486 // reduce_func_[baseline|perfect_hash](this_ptr, that_ptr)
487 //
488 // func reduce_loop(this_buff, that_buff, start_entry_index, end_entry_index):
489 // for that_entry_index in [start_entry_index, end_entry_index):
490 // reduce_func_idx(this_buff, that_buff, that_entry_index)
491 
493  const auto hash_type = query_mem_desc_.getQueryDescriptionType();
495  return {};
496  }
497  auto reduction_code = setup_functions_ir(hash_type);
498  isEmpty(reduction_code);
502  reduceOneEntryNoCollisions(reduction_code);
503  reduceOneEntryNoCollisionsIdx(reduction_code);
504  break;
505  }
507  reduceOneEntryBaseline(reduction_code);
508  reduceOneEntryBaselineIdx(reduction_code);
509  break;
510  }
511  default: {
512  LOG(FATAL) << "Unexpected query description type";
513  }
514  }
515  reduceLoop(reduction_code);
516  // For small result sets, avoid native code generation and use the interpreter instead.
519  return reduction_code;
520  }
521  std::lock_guard<std::mutex> reduction_guard(ReductionCode::s_reduction_mutex);
522  CodeCacheKey key{cacheKey()};
523  const auto val_ptr = s_code_cache.get(key);
524  if (val_ptr) {
525  return {reinterpret_cast<ReductionCode::FuncPtr>(std::get<0>(val_ptr->first.front())),
526  nullptr,
527  nullptr,
528  nullptr,
529  std::move(reduction_code.ir_is_empty),
530  std::move(reduction_code.ir_reduce_one_entry),
531  std::move(reduction_code.ir_reduce_one_entry_idx),
532  std::move(reduction_code.ir_reduce_loop)};
533  }
534  reduction_code.cgen_state.reset(new CgenState({}, false));
535  auto cgen_state = reduction_code.cgen_state.get();
536  std::unique_ptr<llvm::Module> module(runtime_module_shallow_copy(cgen_state));
537  cgen_state->module_ = module.get();
538  auto ir_is_empty = create_llvm_function(reduction_code.ir_is_empty.get(), cgen_state);
539  auto ir_reduce_one_entry =
540  create_llvm_function(reduction_code.ir_reduce_one_entry.get(), cgen_state);
541  auto ir_reduce_one_entry_idx =
542  create_llvm_function(reduction_code.ir_reduce_one_entry_idx.get(), cgen_state);
543  auto ir_reduce_loop =
544  create_llvm_function(reduction_code.ir_reduce_loop.get(), cgen_state);
545  std::unordered_map<const Function*, llvm::Function*> f;
546  f.emplace(reduction_code.ir_is_empty.get(), ir_is_empty);
547  f.emplace(reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry);
548  f.emplace(reduction_code.ir_reduce_one_entry_idx.get(), ir_reduce_one_entry_idx);
549  f.emplace(reduction_code.ir_reduce_loop.get(), ir_reduce_loop);
550  translate_function(reduction_code.ir_is_empty.get(), ir_is_empty, reduction_code, f);
552  reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry, reduction_code, f);
553  translate_function(reduction_code.ir_reduce_one_entry_idx.get(),
554  ir_reduce_one_entry_idx,
555  reduction_code,
556  f);
558  reduction_code.ir_reduce_loop.get(), ir_reduce_loop, reduction_code, f);
559  reduction_code.llvm_reduce_loop = ir_reduce_loop;
560  reduction_code.module = std::move(module);
561  return finalizeReductionCode(std::move(reduction_code),
562  ir_is_empty,
563  ir_reduce_one_entry,
564  ir_reduce_one_entry_idx,
565  key);
566 }
567 
569  // Clear stub cache to avoid crash caused by non-deterministic static destructor order
570  // of LLVM context and the cache.
573  g_rt_module = nullptr;
574 }
575 
576 void ResultSetReductionJIT::isEmpty(const ReductionCode& reduction_code) const {
577  auto ir_is_empty = reduction_code.ir_is_empty.get();
580  Value* key{nullptr};
581  Value* empty_key_val{nullptr};
582  const auto keys_ptr = ir_is_empty->arg(0);
587  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
588  target_init_vals_.size());
589  const int64_t target_slot_off =
591  const auto slot_ptr = ir_is_empty->add<GetElementPtr>(
592  keys_ptr,
593  ir_is_empty->addConstant<ConstantInt>(target_slot_off, Type::Int32),
594  "is_empty_slot_ptr");
595  const auto compact_sz =
597  key = emit_read_int_from_buff(slot_ptr, compact_sz, ir_is_empty);
598  empty_key_val = ir_is_empty->addConstant<ConstantInt>(
600  } else {
602  case 4: {
605  key = emit_load_i32(keys_ptr, ir_is_empty);
606  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_32, Type::Int32);
607  break;
608  }
609  case 8: {
610  key = emit_load_i64(keys_ptr, ir_is_empty);
611  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_64, Type::Int64);
612  break;
613  }
614  default:
615  LOG(FATAL) << "Invalid key width";
616  }
617  }
618  const auto ret =
619  ir_is_empty->add<ICmp>(ICmp::Predicate::EQ, key, empty_key_val, "is_key_empty");
620  ir_is_empty->add<Ret>(ret);
621 }
622 
624  const ReductionCode& reduction_code) const {
625  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
626  const auto this_row_ptr = ir_reduce_one_entry->arg(0);
627  const auto that_row_ptr = ir_reduce_one_entry->arg(1);
628  const auto that_is_empty =
629  ir_reduce_one_entry->add<Call>(reduction_code.ir_is_empty.get(),
630  std::vector<const Value*>{that_row_ptr},
631  "that_is_empty");
632  ir_reduce_one_entry->add<ReturnEarly>(that_is_empty, 0, "");
633 
634  const auto key_bytes = get_key_bytes_rowwise(query_mem_desc_);
635  if (key_bytes) { // copy the key from right hand side
636  ir_reduce_one_entry->add<MemCpy>(
637  this_row_ptr,
638  that_row_ptr,
639  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes, Type::Int32));
640  }
641 
642  const auto key_bytes_with_padding = align_to_int64(key_bytes);
643  const auto key_bytes_lv =
644  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes_with_padding, Type::Int32);
645  const auto this_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
646  this_row_ptr, key_bytes_lv, "this_targets_start");
647  const auto that_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
648  that_row_ptr, key_bytes_lv, "that_targets_start");
649 
651  ir_reduce_one_entry, this_targets_start_ptr, that_targets_start_ptr);
652 }
653 
655  Function* ir_reduce_one_entry,
656  Value* this_targets_start_ptr,
657  Value* that_targets_start_ptr) const {
658  const auto& col_slot_context = query_mem_desc_.getColSlotContext();
659  Value* this_targets_ptr = this_targets_start_ptr;
660  Value* that_targets_ptr = that_targets_start_ptr;
661  size_t init_agg_val_idx = 0;
662  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
663  ++target_logical_idx) {
664  const auto& target_info = targets_[target_logical_idx];
665  const auto& slots_for_col = col_slot_context.getSlotsForCol(target_logical_idx);
666  Value* this_ptr2{nullptr};
667  Value* that_ptr2{nullptr};
668 
669  bool two_slot_target{false};
670  if (target_info.is_agg &&
671  (target_info.agg_kind == kAVG ||
672  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
673  // Note that this assumes if one of the slot pairs in a given target is an array,
674  // all slot pairs are arrays. Currently this is true for all geo targets, but we
675  // should better codify and store this information in the future
676  two_slot_target = true;
677  }
678 
679  for (size_t target_slot_idx = slots_for_col.front();
680  target_slot_idx < slots_for_col.back() + 1;
681  target_slot_idx += 2) {
682  const auto slot_off_val = query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx);
683  const auto slot_off =
684  ir_reduce_one_entry->addConstant<ConstantInt>(slot_off_val, Type::Int32);
685  if (UNLIKELY(two_slot_target)) {
686  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
687  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
688  this_targets_ptr, slot_off, "this_" + desc);
689  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
690  that_targets_ptr, slot_off, "that_" + desc);
691  }
692  reduceOneSlot(this_targets_ptr,
693  this_ptr2,
694  that_targets_ptr,
695  that_ptr2,
696  target_info,
697  target_logical_idx,
698  target_slot_idx,
699  init_agg_val_idx,
700  slots_for_col.front(),
701  ir_reduce_one_entry);
702  auto increment_agg_val_idx_maybe =
703  [&init_agg_val_idx, &target_logical_idx, this](const int slot_count) {
705  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
706  init_agg_val_idx += slot_count;
707  }
708  };
709  if (target_logical_idx + 1 == targets_.size() &&
710  target_slot_idx + 1 >= slots_for_col.back()) {
711  break;
712  }
713  const auto next_desc =
714  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
715  if (UNLIKELY(two_slot_target)) {
716  increment_agg_val_idx_maybe(2);
717  const auto two_slot_off = ir_reduce_one_entry->addConstant<ConstantInt>(
718  slot_off_val + query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx + 1),
719  Type::Int32);
720  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
721  this_targets_ptr, two_slot_off, "this_" + next_desc);
722  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
723  that_targets_ptr, two_slot_off, "that_" + next_desc);
724  } else {
725  increment_agg_val_idx_maybe(1);
726  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
727  this_targets_ptr, slot_off, "this_" + next_desc);
728  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
729  that_targets_ptr, slot_off, "that_" + next_desc);
730  }
731  }
732  }
733  ir_reduce_one_entry->add<Ret>();
734 }
735 
737  const ReductionCode& reduction_code) const {
738  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
739  const auto this_targets_ptr_arg = ir_reduce_one_entry->arg(0);
740  const auto that_targets_ptr_arg = ir_reduce_one_entry->arg(1);
741  Value* this_ptr1 = this_targets_ptr_arg;
742  Value* that_ptr1 = that_targets_ptr_arg;
743  size_t j = 0;
744  size_t init_agg_val_idx = 0;
745  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
746  ++target_logical_idx) {
747  const auto& target_info = targets_[target_logical_idx];
748  Value* this_ptr2{nullptr};
749  Value* that_ptr2{nullptr};
750  if (target_info.is_agg &&
751  (target_info.agg_kind == kAVG ||
752  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
753  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
754  const auto second_slot_rel_off =
755  ir_reduce_one_entry->addConstant<ConstantInt>(sizeof(int64_t), Type::Int32);
756  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
757  this_ptr1, second_slot_rel_off, "this_" + desc);
758  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
759  that_ptr1, second_slot_rel_off, "that_" + desc);
760  }
761  reduceOneSlot(this_ptr1,
762  this_ptr2,
763  that_ptr1,
764  that_ptr2,
765  target_info,
766  target_logical_idx,
767  j,
768  init_agg_val_idx,
769  j,
770  ir_reduce_one_entry);
771  if (target_logical_idx + 1 == targets_.size()) {
772  break;
773  }
775  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
776  } else {
777  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
778  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
779  }
780  }
781  j = advance_slot(j, target_info, false);
782  const auto next_desc =
783  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
784  auto next_slot_rel_off = ir_reduce_one_entry->addConstant<ConstantInt>(
785  init_agg_val_idx * sizeof(int64_t), Type::Int32);
786  this_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
787  this_targets_ptr_arg, next_slot_rel_off, next_desc);
788  that_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
789  that_targets_ptr_arg, next_slot_rel_off, next_desc);
790  }
791  ir_reduce_one_entry->add<Ret>();
792 }
793 
795  const ReductionCode& reduction_code) const {
796  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
801  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
802  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
803  const auto entry_idx = ir_reduce_one_entry_idx->arg(2);
804  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
805  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
806  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
807  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
809  const auto row_off_in_bytes = ir_reduce_one_entry_idx->add<BinaryOperator>(
810  BinaryOperator::BinaryOp::Mul, entry_idx, row_bytes, "row_off_in_bytes");
811  const auto this_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
812  this_buff, row_off_in_bytes, "this_row_ptr");
813  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
814  that_buff, row_off_in_bytes, "that_row_ptr");
815  ir_reduce_one_entry_idx->add<Call>(
816  reduction_code.ir_reduce_one_entry.get(),
817  std::vector<const Value*>{this_row_ptr,
818  that_row_ptr,
819  this_qmd_handle,
820  that_qmd_handle,
821  serialized_varlen_buffer_arg},
822  "");
823  ir_reduce_one_entry_idx->add<Ret>();
824 }
825 
827  const ReductionCode& reduction_code) const {
828  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
833  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
834  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
835  const auto that_entry_idx = ir_reduce_one_entry_idx->arg(2);
836  const auto that_entry_count = ir_reduce_one_entry_idx->arg(3);
837  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
838  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
839  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
840  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
842  const auto that_row_off_in_bytes = ir_reduce_one_entry_idx->add<BinaryOperator>(
843  BinaryOperator::BinaryOp::Mul, that_entry_idx, row_bytes, "that_row_off_in_bytes");
844  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
845  that_buff, that_row_off_in_bytes, "that_row_ptr");
846  const auto that_is_empty =
847  ir_reduce_one_entry_idx->add<Call>(reduction_code.ir_is_empty.get(),
848  std::vector<const Value*>{that_row_ptr},
849  "that_is_empty");
850  ir_reduce_one_entry_idx->add<ReturnEarly>(that_is_empty, 0, "");
851  const auto key_count = query_mem_desc_.getGroupbyColCount();
852  const auto one_element =
853  ir_reduce_one_entry_idx->addConstant<ConstantInt>(1, Type::Int32);
854  const auto this_targets_ptr_i64_ptr = ir_reduce_one_entry_idx->add<Alloca>(
855  Type::Int64Ptr, one_element, "this_targets_ptr_out");
856  const auto this_is_empty_ptr =
857  ir_reduce_one_entry_idx->add<Alloca>(Type::Int8, one_element, "this_is_empty_out");
858  ir_reduce_one_entry_idx->add<ExternalCall>(
859  "get_group_value_reduction_rt",
860  Type::Void,
861  std::vector<const Value*>{
862  this_buff,
863  that_row_ptr,
864  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_count, Type::Int32),
865  this_qmd_handle,
866  that_buff,
867  that_entry_idx,
868  that_entry_count,
869  row_bytes,
870  this_targets_ptr_i64_ptr,
871  this_is_empty_ptr},
872  "");
873  const auto this_targets_ptr_i64 = ir_reduce_one_entry_idx->add<Load>(
874  this_targets_ptr_i64_ptr, "this_targets_ptr_i64");
875  auto this_is_empty =
876  ir_reduce_one_entry_idx->add<Load>(this_is_empty_ptr, "this_is_empty");
877  this_is_empty = ir_reduce_one_entry_idx->add<Cast>(
878  Cast::CastOp::Trunc, this_is_empty, Type::Int1, "this_is_empty_bool");
879  ir_reduce_one_entry_idx->add<ReturnEarly>(this_is_empty, 0, "");
881  const auto this_targets_ptr = ir_reduce_one_entry_idx->add<Cast>(
882  Cast::CastOp::BitCast, this_targets_ptr_i64, Type::Int8Ptr, "this_targets_ptr");
883  const auto key_byte_count = key_qw_count * sizeof(int64_t);
884  const auto key_byte_count_lv =
885  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_byte_count, Type::Int32);
886  const auto that_targets_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
887  that_row_ptr, key_byte_count_lv, "that_targets_ptr");
888  ir_reduce_one_entry_idx->add<Call>(
889  reduction_code.ir_reduce_one_entry.get(),
890  std::vector<const Value*>{this_targets_ptr,
891  that_targets_ptr,
892  this_qmd_handle,
893  that_qmd_handle,
894  serialized_varlen_buffer_arg},
895  "");
896  ir_reduce_one_entry_idx->add<Ret>();
897 }
898 
899 namespace {
900 
901 void generate_loop_body(For* for_loop,
902  Function* ir_reduce_loop,
903  Function* ir_reduce_one_entry_idx,
904  Value* this_buff,
905  Value* that_buff,
906  Value* start_index,
907  Value* that_entry_count,
908  Value* this_qmd_handle,
909  Value* that_qmd_handle,
910  Value* serialized_varlen_buffer) {
911  const auto that_entry_idx = for_loop->add<BinaryOperator>(
912  BinaryOperator::BinaryOp::Add, for_loop->iter(), start_index, "that_entry_idx");
913  const auto watchdog_sample_seed =
914  for_loop->add<Cast>(Cast::CastOp::SExt, that_entry_idx, Type::Int64, "");
915  const auto watchdog_triggered =
916  for_loop->add<ExternalCall>("check_watchdog_rt",
917  Type::Int8,
918  std::vector<const Value*>{watchdog_sample_seed},
919  "");
920  const auto watchdog_triggered_bool =
921  for_loop->add<ICmp>(ICmp::Predicate::NE,
922  watchdog_triggered,
923  ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int8),
924  "");
925  for_loop->add<ReturnEarly>(watchdog_triggered_bool, WATCHDOG_ERROR, "");
926  for_loop->add<Call>(ir_reduce_one_entry_idx,
927  std::vector<const Value*>{this_buff,
928  that_buff,
929  that_entry_idx,
930  that_entry_count,
931  this_qmd_handle,
932  that_qmd_handle,
933  serialized_varlen_buffer},
934  "");
935 }
936 
937 } // namespace
938 
939 void ResultSetReductionJIT::reduceLoop(const ReductionCode& reduction_code) const {
940  auto ir_reduce_loop = reduction_code.ir_reduce_loop.get();
941  const auto this_buff_arg = ir_reduce_loop->arg(0);
942  const auto that_buff_arg = ir_reduce_loop->arg(1);
943  const auto start_index_arg = ir_reduce_loop->arg(2);
944  const auto end_index_arg = ir_reduce_loop->arg(3);
945  const auto that_entry_count_arg = ir_reduce_loop->arg(4);
946  const auto this_qmd_handle_arg = ir_reduce_loop->arg(5);
947  const auto that_qmd_handle_arg = ir_reduce_loop->arg(6);
948  const auto serialized_varlen_buffer_arg = ir_reduce_loop->arg(7);
949  For* for_loop =
950  static_cast<For*>(ir_reduce_loop->add<For>(start_index_arg, end_index_arg, ""));
951  generate_loop_body(for_loop,
952  ir_reduce_loop,
953  reduction_code.ir_reduce_one_entry_idx.get(),
954  this_buff_arg,
955  that_buff_arg,
956  start_index_arg,
957  that_entry_count_arg,
958  this_qmd_handle_arg,
959  that_qmd_handle_arg,
960  serialized_varlen_buffer_arg);
961  ir_reduce_loop->add<Ret>(ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int32));
962 }
963 
965  Value* this_ptr2,
966  Value* that_ptr1,
967  Value* that_ptr2,
968  const TargetInfo& target_info,
969  const size_t target_logical_idx,
970  const size_t target_slot_idx,
971  const size_t init_agg_val_idx,
972  const size_t first_slot_idx_for_target,
973  Function* ir_reduce_one_entry) const {
975  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
976  return;
977  }
978  }
979  const bool float_argument_input = takes_float_argument(target_info);
980  const auto chosen_bytes =
981  get_width_for_slot(target_slot_idx, float_argument_input, query_mem_desc_);
982  CHECK_LT(init_agg_val_idx, target_init_vals_.size());
983  auto init_val = target_init_vals_[init_agg_val_idx];
984  if (target_info.is_agg && target_info.agg_kind != kSAMPLE) {
985  reduceOneAggregateSlot(this_ptr1,
986  this_ptr2,
987  that_ptr1,
988  that_ptr2,
989  target_info,
990  target_logical_idx,
991  target_slot_idx,
992  init_val,
993  chosen_bytes,
994  ir_reduce_one_entry);
995  } else {
997  this_ptr1, that_ptr1, init_val, chosen_bytes, ir_reduce_one_entry);
998  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()) {
999  CHECK(this_ptr2 && that_ptr2);
1000  size_t length_to_elems{0};
1001  if (target_info.sql_type.is_geometry()) {
1002  // TODO: Assumes hard-coded sizes for geometry targets
1003  length_to_elems = target_slot_idx == first_slot_idx_for_target ? 1 : 4;
1004  } else {
1005  const auto& elem_ti = target_info.sql_type.get_elem_type();
1006  length_to_elems = target_info.sql_type.is_string() ? 1 : elem_ti.get_size();
1007  }
1008  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry->arg(4);
1009  ir_reduce_one_entry->add<ExternalCall>(
1010  "serialized_varlen_buffer_sample",
1011  Type::Void,
1012  std::vector<const Value*>{
1013  serialized_varlen_buffer_arg,
1014  this_ptr1,
1015  this_ptr2,
1016  that_ptr1,
1017  that_ptr2,
1018  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64),
1019  ir_reduce_one_entry->addConstant<ConstantInt>(length_to_elems,
1020  Type::Int64)},
1021  "");
1022  }
1023  }
1024 }
1025 
1027  Value* this_ptr2,
1028  Value* that_ptr1,
1029  Value* that_ptr2,
1030  const TargetInfo& target_info,
1031  const size_t target_logical_idx,
1032  const size_t target_slot_idx,
1033  const int64_t init_val,
1034  const int8_t chosen_bytes,
1035  Function* ir_reduce_one_entry) const {
1036  switch (target_info.agg_kind) {
1037  case kCOUNT:
1038  case kAPPROX_COUNT_DISTINCT: {
1039  if (is_distinct_target(target_info)) {
1040  CHECK_EQ(static_cast<size_t>(chosen_bytes), sizeof(int64_t));
1042  this_ptr1, that_ptr1, target_logical_idx, ir_reduce_one_entry);
1043  break;
1044  }
1045  CHECK_EQ(int64_t(0), init_val);
1046  emit_aggregate_one_count(this_ptr1, that_ptr1, chosen_bytes, ir_reduce_one_entry);
1047  break;
1048  }
1049  case kAVG: {
1050  // Ignore float argument compaction for count component for fear of its overflow
1051  emit_aggregate_one_count(this_ptr2,
1052  that_ptr2,
1053  query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx),
1054  ir_reduce_one_entry);
1055  }
1056  // fall thru
1057  case kSUM: {
1059  this_ptr1,
1060  that_ptr1,
1061  init_val,
1062  chosen_bytes,
1063  target_info,
1064  ir_reduce_one_entry);
1065  break;
1066  }
1067  case kMIN: {
1069  this_ptr1,
1070  that_ptr1,
1071  init_val,
1072  chosen_bytes,
1073  target_info,
1074  ir_reduce_one_entry);
1075  break;
1076  }
1077  case kMAX: {
1079  this_ptr1,
1080  that_ptr1,
1081  init_val,
1082  chosen_bytes,
1083  target_info,
1084  ir_reduce_one_entry);
1085  break;
1086  }
1087  default:
1088  LOG(FATAL) << "Invalid aggregate type";
1089  }
1090 }
1091 
1093  Value* this_ptr1,
1094  Value* that_ptr1,
1095  const size_t target_logical_idx,
1096  Function* ir_reduce_one_entry) const {
1098  const auto old_set_handle = emit_load_i64(this_ptr1, ir_reduce_one_entry);
1099  const auto new_set_handle = emit_load_i64(that_ptr1, ir_reduce_one_entry);
1100  const auto this_qmd_arg = ir_reduce_one_entry->arg(2);
1101  const auto that_qmd_arg = ir_reduce_one_entry->arg(3);
1102  ir_reduce_one_entry->add<ExternalCall>(
1103  "count_distinct_set_union_jit_rt",
1104  Type::Void,
1105  std::vector<const Value*>{
1106  new_set_handle,
1107  old_set_handle,
1108  that_qmd_arg,
1109  this_qmd_arg,
1110  ir_reduce_one_entry->addConstant<ConstantInt>(target_logical_idx, Type::Int64)},
1111  "");
1112 }
1113 
1115  ReductionCode reduction_code,
1116  const llvm::Function* ir_is_empty,
1117  const llvm::Function* ir_reduce_one_entry,
1118  const llvm::Function* ir_reduce_one_entry_idx,
1119  const CodeCacheKey& key) const {
1120  CompilationOptions co{
1122  reduction_code.module.release();
1124  reduction_code.llvm_reduce_loop, {reduction_code.llvm_reduce_loop}, co);
1125  reduction_code.func_ptr = reinterpret_cast<ReductionCode::FuncPtr>(
1126  ee->getPointerToFunction(reduction_code.llvm_reduce_loop));
1127  auto cache_val =
1128  std::make_tuple(reinterpret_cast<void*>(reduction_code.func_ptr), std::move(ee));
1129  std::vector<std::tuple<void*, ExecutionEngineWrapper>> cache_vals;
1130  cache_vals.emplace_back(std::move(cache_val));
1132  std::move(cache_vals),
1133  reduction_code.llvm_reduce_loop->getParent(),
1134  s_code_cache);
1135  return reduction_code;
1136 }
1137 
1138 namespace {
1139 
1140 std::string target_info_key(const TargetInfo& target_info) {
1141  return std::to_string(target_info.is_agg) + "\n" +
1142  std::to_string(target_info.agg_kind) + "\n" +
1143  target_info.sql_type.get_type_name() + "\n" +
1144  std::to_string(target_info.sql_type.get_notnull()) + "\n" +
1145  target_info.agg_arg_type.get_type_name() + "\n" +
1146  std::to_string(target_info.agg_arg_type.get_notnull()) + "\n" +
1147  std::to_string(target_info.skip_null_val) + "\n" +
1148  std::to_string(target_info.is_distinct);
1149 }
1150 
1151 } // namespace
1152 
1153 std::string ResultSetReductionJIT::cacheKey() const {
1154  std::vector<std::string> target_init_vals_strings;
1155  std::transform(target_init_vals_.begin(),
1156  target_init_vals_.end(),
1157  std::back_inserter(target_init_vals_strings),
1158  [](const int64_t v) { return std::to_string(v); });
1159  const auto target_init_vals_key =
1160  boost::algorithm::join(target_init_vals_strings, ", ");
1161  std::vector<std::string> targets_strings;
1162  std::transform(
1163  targets_.begin(),
1164  targets_.end(),
1165  std::back_inserter(targets_strings),
1166  [](const TargetInfo& target_info) { return target_info_key(target_info); });
1167  const auto targets_key = boost::algorithm::join(targets_strings, ", ");
1168  return query_mem_desc_.reductionKey() + "\n" + target_init_vals_key + "\n" +
1169  targets_key;
1170 }
void emit_aggregate_one_nullable_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const int64_t init_val, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
QueryDescriptionType
Definition: Types.h:26
void clear()
Definition: LruCache.hpp:57
#define CHECK_EQ(x, y)
Definition: Logger.h:198
void reduceOneSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const size_t init_agg_val_idx, const size_t first_slot_idx_for_target, Function *ir_reduce_one_entry) const
const int32_t groups_buffer_size return groups_buffer
bool is_aggregate_query(const QueryDescriptionType hash_type)
std::unique_ptr< llvm::Module > module(runtime_module_shallow_copy(cgen_state))
void count_distinct_set_union(const int64_t new_set_handle, const int64_t old_set_handle, const CountDistinctDescriptor &new_count_distinct_desc, const CountDistinctDescriptor &old_count_distinct_desc)
__device__ bool dynamic_watchdog()
#define EMPTY_KEY_64
static void addCodeToCache(const CodeCacheKey &, std::vector< std::tuple< void *, ExecutionEngineWrapper >>, llvm::Module *, CodeCache &)
void count_distinct_set_union_jit_rt(const int64_t new_set_handle, const int64_t old_set_handle, const void *that_qmd_handle, const void *this_qmd_handle, const int64_t target_logical_idx)
const std::string & label() const
std::unique_ptr< llvm::Module > runtime_module_shallow_copy(CgenState *cgen_state)
void reduceOneEntryNoCollisions(const ReductionCode &reduction_code) const
void serialized_varlen_buffer_sample(const void *serialized_varlen_buffer_handle, int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val, const int64_t length_to_elems)
void varlen_buffer_sample(int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val)
std::unique_ptr< Function > ir_reduce_loop
Value * emit_read_int_from_buff(Value *ptr, const int8_t compact_sz, Function *function)
void reduceOneEntryBaselineIdx(const ReductionCode &reduction_code) const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define LOG(tag)
Definition: Logger.h:185
void mark_function_always_inline(llvm::Function *func)
void get_group_value_reduction_rt(int8_t *groups_buffer, const int8_t *key, const uint32_t key_count, const void *this_qmd_handle, const int8_t *that_buff, const uint32_t that_entry_idx, const uint32_t that_entry_count, const uint32_t row_size_bytes, int64_t **buff_out, uint8_t *empty)
void reduceLoop(const ReductionCode &reduction_code) const
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
std::string join(T const &container, std::string const &delim)
llvm::Function * llvm_reduce_loop
void reduceOneEntryNoCollisionsIdx(const ReductionCode &reduction_code) const
#define CHECK_GE(x, y)
Definition: Logger.h:203
std::vector< std::string > CodeCacheKey
Definition: CodeCache.h:61
ReductionCode finalizeReductionCode(ReductionCode reduction_code, const llvm::Function *ir_is_empty, const llvm::Function *ir_reduce_one_entry, const llvm::Function *ir_reduce_one_entry_idx, const CodeCacheKey &key) const
size_t get_slot_off_quad(const QueryMemoryDescriptor &query_mem_desc)
std::string cacheKey() const
size_t getEffectiveKeyWidth() const
bool is_varlen() const
Definition: sqltypes.h:491
std::unique_ptr< Function > ir_reduce_one_entry
bool g_enable_dynamic_watchdog
Definition: Execute.cpp:72
static ExecutionEngineWrapper generateNativeCPUCode(llvm::Function *func, const std::unordered_set< llvm::Function * > &live_funcs, const CompilationOptions &co)
const std::vector< int64_t > target_init_vals_
void reduceOneAggregateSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const int64_t init_val, const int8_t chosen_bytes, Function *ir_reduce_one_entry) const
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:120
Value * add(Args &&...args)
bool skip_null_val
Definition: TargetInfo.h:44
int8_t get_width_for_slot(const size_t target_slot_idx, const bool float_argument_input, const QueryMemoryDescriptor &query_mem_desc)
const int64_t const uint32_t const uint32_t key_qw_count
std::unique_ptr< Function > setup_reduce_one_entry_idx(ReductionCode *reduction_code)
int32_t(*)(int8_t *this_buff, const int8_t *that_buff, const int32_t start_entry_index, const int32_t end_entry_index, const int32_t that_entry_count, const void *this_qmd, const void *that_qmd, const void *serialized_varlen_buffer) FuncPtr
const QueryMemoryDescriptor query_mem_desc_
std::unique_ptr< Function > ir_is_empty
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
void translate_function(const Function *function, llvm::Function *llvm_function, const ReductionCode &reduction_code, const std::unordered_map< const Function *, llvm::Function * > &f)
void emit_aggregate_one_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
Value * emit_load_i32(Value *ptr, Function *function)
std::string get_type_name() const
Definition: sqltypes.h:429
Definition: sqldefs.h:71
const SQLTypeInfo get_compact_type(const TargetInfo &target)
false auto cgen_state
llvm::Module * module_
Definition: CgenState.h:264
ResultSetReductionJIT(const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const std::vector< int64_t > &target_init_vals)
llvm::LLVMContext & context_
Definition: CgenState.h:267
bool is_agg
Definition: TargetInfo.h:40
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
CHECK(cgen_state)
void reduceOneCountDistinctSlot(Value *this_ptr1, Value *that_ptr1, const size_t target_logical_idx, Function *ir_reduce_one_entry) const
size_t getGroupbyColCount() const
GroupValueInfo get_group_value_reduction(int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const size_t key_width, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
size_t targetGroupbyIndicesSize() const
void generate_loop_body(For *for_loop, Function *ir_reduce_loop, Function *ir_reduce_one_entry_idx, Value *this_buff, Value *that_buff, Value *start_index, Value *that_entry_count, Value *this_qmd_handle, Value *that_qmd_handle, Value *serialized_varlen_buffer)
void emit_write_projection(Value *slot_pi8, Value *other_pi8, const int64_t init_val, const size_t chosen_bytes, Function *ir_reduce_one_entry)
std::unique_ptr< llvm::Module > g_rt_module
llvm::Function * create_llvm_function(const Function *function, const CgenState *cgen_state)
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:333
uint8_t check_watchdog_rt(const size_t sample_seed)
Definition: sqldefs.h:71
static std::mutex s_reduction_mutex
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:116
std::string target_info_key(const TargetInfo &target_info)
std::unique_ptr< Function > ir_reduce_one_entry_idx
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ReductionCode setup_functions_ir(const QueryDescriptionType hash_type)
std::unique_ptr< Function > setup_is_empty_entry(ReductionCode *reduction_code)
SQLAgg agg_kind
Definition: TargetInfo.h:41
bool is_geometry() const
Definition: sqltypes.h:489
size_t getCountDistinctDescriptorsSize() const
void reduceOneEntryTargetsNoCollisions(Function *ir_reduce_one_entry, Value *this_targets_start_ptr, Value *that_targets_start_ptr) const
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
QueryDescriptionType getQueryDescriptionType() const
#define UNLIKELY(x)
Definition: likely.h:20
llvm::Type * llvm_type(const Type type, llvm::LLVMContext &ctx)
ReductionCode codegen() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:200
size_t get_row_bytes(const QueryMemoryDescriptor &query_mem_desc)
Definition: sqldefs.h:71
std::unique_ptr< Function > create_function(const std::string name, const std::vector< Function::NamedArg > &arg_types, const Type ret_type, const bool always_inline)
const Value * iter() const
std::unique_ptr< Function > setup_reduce_one_entry(ReductionCode *reduction_code, const QueryDescriptionType hash_type)
void isEmpty(const ReductionCode &reduction_code) const
Value * emit_load_i64(Value *ptr, Function *function)
bool is_string() const
Definition: sqltypes.h:477
const ColSlotContext & getColSlotContext() const
#define EMPTY_KEY_32
std::unique_ptr< llvm::Module > module
void reduceOneEntryBaseline(const ReductionCode &reduction_code) const
SQLTypeInfoCore get_elem_type() const
Definition: sqltypes.h:659
Value * emit_load(Value *ptr, Type ptr_type, Function *function)
value_t * get(const key_t &key)
Definition: LruCache.hpp:39
bool is_distinct
Definition: TargetInfo.h:45
void emit_aggregate_one_count(Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, Function *ir_reduce_one_entry)
Definition: sqldefs.h:71
Definition: sqldefs.h:71
std::unique_ptr< Function > setup_reduce_loop(ReductionCode *reduction_code)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
std::string reductionKey() const
const Executor * getExecutor() const
int32_t getTargetIdxForKey() const
const std::vector< TargetInfo > targets_