OmniSciDB  f632821e96
ResultSetReductionJIT.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ResultSetReductionJIT.h"
20 
21 #include "CodeGenerator.h"
22 #include "DynamicWatchdog.h"
23 #include "Execute.h"
24 #include "IRCodegenUtils.h"
26 
27 #include "Shared/likely.h"
28 
29 #include <llvm/Bitcode/BitcodeReader.h>
30 #include <llvm/IR/Function.h>
31 #include <llvm/IR/IRBuilder.h>
32 #include <llvm/IR/Verifier.h>
33 #include <llvm/Support/SourceMgr.h>
34 #include <llvm/Support/raw_os_ostream.h>
35 
36 extern std::unique_ptr<llvm::Module> g_rt_module;
37 
39 
41 
42 namespace {
43 
44 // Error code to be returned when the watchdog timer triggers during the reduction.
45 const int32_t WATCHDOG_ERROR{-1};
46 // Use the interpreter, not the JIT, for a number of entries lower than the threshold.
47 const size_t INTERP_THRESHOLD{25};
48 
49 // Load the value stored at 'ptr' interpreted as 'ptr_type'.
50 Value* emit_load(Value* ptr, Type ptr_type, Function* function) {
51  return function->add<Load>(
52  function->add<Cast>(Cast::CastOp::BitCast, ptr, ptr_type, ""),
53  ptr->label() + "_loaded");
54 }
55 
56 // Load the value stored at 'ptr' as a 32-bit signed integer.
57 Value* emit_load_i32(Value* ptr, Function* function) {
58  return emit_load(ptr, Type::Int32Ptr, function);
59 }
60 
61 // Load the value stored at 'ptr' as a 64-bit signed integer.
62 Value* emit_load_i64(Value* ptr, Function* function) {
63  return emit_load(ptr, Type::Int64Ptr, function);
64 }
65 
66 // Read a 32- or 64-bit integer stored at 'ptr' and sign extend to 64-bit.
67 Value* emit_read_int_from_buff(Value* ptr, const int8_t compact_sz, Function* function) {
68  switch (compact_sz) {
69  case 8: {
70  return emit_load_i64(ptr, function);
71  }
72  case 4: {
73  const auto loaded_val = emit_load_i32(ptr, function);
74  return function->add<Cast>(Cast::CastOp::SExt, loaded_val, Type::Int64, "");
75  }
76  default: {
77  LOG(FATAL) << "Invalid byte width: " << compact_sz;
78  return nullptr;
79  }
80  }
81 }
82 
83 // Emit a runtime call to accumulate into the 'val_ptr' byte address the 'other_ptr'
84 // value when the type is specified as not null.
85 void emit_aggregate_one_value(const std::string& agg_kind,
86  Value* val_ptr,
87  Value* other_ptr,
88  const size_t chosen_bytes,
89  const TargetInfo& agg_info,
90  Function* ir_reduce_one_entry) {
91  const auto sql_type = get_compact_type(agg_info);
92  const auto dest_name = agg_kind + "_dest";
93  if (sql_type.is_fp()) {
94  if (chosen_bytes == sizeof(float)) {
95  const auto agg = ir_reduce_one_entry->add<Cast>(
96  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
97  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
98  ir_reduce_one_entry->add<Call>(
99  "agg_" + agg_kind + "_float", std::vector<const Value*>{agg, val}, "");
100  } else {
101  CHECK_EQ(chosen_bytes, sizeof(double));
102  const auto agg = ir_reduce_one_entry->add<Cast>(
103  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
104  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
105  ir_reduce_one_entry->add<Call>(
106  "agg_" + agg_kind + "_double", std::vector<const Value*>{agg, val}, "");
107  }
108  } else {
109  if (chosen_bytes == sizeof(int32_t)) {
110  const auto agg = ir_reduce_one_entry->add<Cast>(
111  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
112  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
113  ir_reduce_one_entry->add<Call>(
114  "agg_" + agg_kind + "_int32", std::vector<const Value*>{agg, val}, "");
115  } else {
116  CHECK_EQ(chosen_bytes, sizeof(int64_t));
117  const auto agg = ir_reduce_one_entry->add<Cast>(
118  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
119  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
120  ir_reduce_one_entry->add<Call>(
121  "agg_" + agg_kind, std::vector<const Value*>{agg, val}, "");
122  }
123  }
124 }
125 
126 // Same as above, but support nullable types as well.
127 void emit_aggregate_one_nullable_value(const std::string& agg_kind,
128  Value* val_ptr,
129  Value* other_ptr,
130  const int64_t init_val,
131  const size_t chosen_bytes,
132  const TargetInfo& agg_info,
133  Function* ir_reduce_one_entry) {
134  const auto dest_name = agg_kind + "_dest";
135  if (agg_info.skip_null_val) {
136  const auto sql_type = get_compact_type(agg_info);
137  if (sql_type.is_fp()) {
138  if (chosen_bytes == sizeof(float)) {
139  const auto agg = ir_reduce_one_entry->add<Cast>(
140  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
141  const auto val = emit_load(other_ptr, Type::FloatPtr, ir_reduce_one_entry);
142  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
143  *reinterpret_cast<const float*>(may_alias_ptr(&init_val)), Type::Float);
144  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_float_skip_val",
145  std::vector<const Value*>{agg, val, init_val_lv},
146  "");
147  } else {
148  CHECK_EQ(chosen_bytes, sizeof(double));
149  const auto agg = ir_reduce_one_entry->add<Cast>(
150  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
151  const auto val = emit_load(other_ptr, Type::DoublePtr, ir_reduce_one_entry);
152  const auto init_val_lv = ir_reduce_one_entry->addConstant<ConstantFP>(
153  *reinterpret_cast<const double*>(may_alias_ptr(&init_val)), Type::Double);
154  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_double_skip_val",
155  std::vector<const Value*>{agg, val, init_val_lv},
156  "");
157  }
158  } else {
159  if (chosen_bytes == sizeof(int32_t)) {
160  const auto agg = ir_reduce_one_entry->add<Cast>(
161  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
162  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
163  const auto init_val_lv =
164  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int32);
165  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_int32_skip_val",
166  std::vector<const Value*>{agg, val, init_val_lv},
167  "");
168  } else {
169  CHECK_EQ(chosen_bytes, sizeof(int64_t));
170  const auto agg = ir_reduce_one_entry->add<Cast>(
171  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
172  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
173  const auto init_val_lv =
174  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64);
175  ir_reduce_one_entry->add<Call>("agg_" + agg_kind + "_skip_val",
176  std::vector<const Value*>{agg, val, init_val_lv},
177  "");
178  }
179  }
180  } else {
182  agg_kind, val_ptr, other_ptr, chosen_bytes, agg_info, ir_reduce_one_entry);
183  }
184 }
185 
186 // Emit code to accumulate the 'other_ptr' count into the 'val_ptr' destination.
188  Value* other_ptr,
189  const size_t chosen_bytes,
190  Function* ir_reduce_one_entry) {
191  const auto dest_name = "count_dest";
192  if (chosen_bytes == sizeof(int32_t)) {
193  const auto agg = ir_reduce_one_entry->add<Cast>(
194  Cast::CastOp::BitCast, val_ptr, Type::Int32Ptr, dest_name);
195  const auto val = emit_load(other_ptr, Type::Int32Ptr, ir_reduce_one_entry);
196  ir_reduce_one_entry->add<Call>(
197  "agg_sum_int32", std::vector<const Value*>{agg, val}, "");
198  } else {
199  CHECK_EQ(chosen_bytes, sizeof(int64_t));
200  const auto agg = ir_reduce_one_entry->add<Cast>(
201  Cast::CastOp::BitCast, val_ptr, Type::Int64Ptr, dest_name);
202  const auto val = emit_load(other_ptr, Type::Int64Ptr, ir_reduce_one_entry);
203  ir_reduce_one_entry->add<Call>("agg_sum", std::vector<const Value*>{agg, val}, "");
204  }
205 }
206 
207 // Emit code to load the value stored at the 'other_pi8' as an integer of the given width
208 // 'chosen_bytes' and write it to the 'slot_pi8' destination only if necessary (the
209 // existing value at destination is the initialization value).
211  Value* other_pi8,
212  const int64_t init_val,
213  const size_t chosen_bytes,
214  Function* ir_reduce_one_entry) {
215  const auto func_name = "write_projection_int" + std::to_string(chosen_bytes * 8);
216  if (chosen_bytes == sizeof(int32_t)) {
217  const auto proj_val = emit_load_i32(other_pi8, ir_reduce_one_entry);
218  ir_reduce_one_entry->add<Call>(
219  func_name,
220  std::vector<const Value*>{
221  slot_pi8,
222  proj_val,
223  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
224  "");
225  } else {
226  CHECK_EQ(chosen_bytes, sizeof(int64_t));
227  const auto proj_val = emit_load_i64(other_pi8, ir_reduce_one_entry);
228  ir_reduce_one_entry->add<Call>(
229  func_name,
230  std::vector<const Value*>{
231  slot_pi8,
232  proj_val,
233  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
234  "");
235  }
236 }
237 
238 // Emit code to load the value stored at the 'other_pi8' as an integer of the given width
239 // 'chosen_bytes' and write it to the 'slot_pi8' destination only if necessary (the
240 // existing value at destination is the initialization value).
242  Value* other_pi8,
243  const int64_t init_val,
244  const size_t chosen_bytes,
245  Function* ir_reduce_one_entry) {
246  if (chosen_bytes == sizeof(int32_t)) {
247  const auto func_name = "checked_single_agg_id_int32";
248  const auto proj_val = emit_load_i32(other_pi8, ir_reduce_one_entry);
249  const auto slot_pi32 = ir_reduce_one_entry->add<Cast>(
250  Cast::CastOp::BitCast, slot_pi8, Type::Int32Ptr, "");
251  return ir_reduce_one_entry->add<Call>(
252  func_name,
253  Type::Int32,
254  std::vector<const Value*>{
255  slot_pi32,
256  proj_val,
257  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int32)},
258  "");
259  } else {
260  const auto func_name = "checked_single_agg_id";
261  CHECK_EQ(chosen_bytes, sizeof(int64_t));
262  const auto proj_val = emit_load_i64(other_pi8, ir_reduce_one_entry);
263  const auto slot_pi64 = ir_reduce_one_entry->add<Cast>(
264  Cast::CastOp::BitCast, slot_pi8, Type::Int64Ptr, "");
265 
266  return ir_reduce_one_entry->add<Call>(
267  func_name,
268  Type::Int32,
269  std::vector<const Value*>{
270  slot_pi64,
271  proj_val,
272  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64)},
273  "");
274  }
275 }
276 
277 std::unique_ptr<Function> create_function(
278  const std::string name,
279  const std::vector<Function::NamedArg>& arg_types,
280  const Type ret_type,
281  const bool always_inline) {
282  return std::make_unique<Function>(name, arg_types, ret_type, always_inline);
283 }
284 
285 // Create the declaration for the 'is_empty_entry' function. Use private linkage since
286 // it's a helper only called from the generated code and mark it as always inline.
287 std::unique_ptr<Function> setup_is_empty_entry(ReductionCode* reduction_code) {
288  return create_function(
289  "is_empty_entry", {{"row_ptr", Type::Int8Ptr}}, Type::Int1, /*always_inline=*/true);
290 }
291 
292 // Create the declaration for the 'reduce_one_entry' helper.
293 std::unique_ptr<Function> setup_reduce_one_entry(ReductionCode* reduction_code,
294  const QueryDescriptionType hash_type) {
295  std::string this_ptr_name;
296  std::string that_ptr_name;
297  switch (hash_type) {
299  this_ptr_name = "this_targets_ptr";
300  that_ptr_name = "that_targets_ptr";
301  break;
302  }
305  this_ptr_name = "this_row_ptr";
306  that_ptr_name = "that_row_ptr";
307  break;
308  }
309  default: {
310  LOG(FATAL) << "Unexpected query description type";
311  }
312  }
313  return create_function("reduce_one_entry",
314  {{this_ptr_name, Type::Int8Ptr},
315  {that_ptr_name, Type::Int8Ptr},
316  {"this_qmd", Type::VoidPtr},
317  {"that_qmd", Type::VoidPtr},
318  {"serialized_varlen_buffer_arg", Type::VoidPtr}},
319  Type::Int32,
320  /*always_inline=*/true);
321 }
322 
323 // Create the declaration for the 'reduce_one_entry_idx' helper.
324 std::unique_ptr<Function> setup_reduce_one_entry_idx(ReductionCode* reduction_code) {
325  return create_function("reduce_one_entry_idx",
326  {{"this_buff", Type::Int8Ptr},
327  {"that_buff", Type::Int8Ptr},
328  {"that_entry_idx", Type::Int32},
329  {"that_entry_count", Type::Int32},
330  {"this_qmd_handle", Type::VoidPtr},
331  {"that_qmd_handle", Type::VoidPtr},
332  {"serialized_varlen_buffer", Type::VoidPtr}},
333  Type::Int32,
334  /*always_inline=*/true);
335 }
336 
337 // Create the declaration for the 'reduce_loop' entry point. Use external linkage, this is
338 // the public API of the generated code directly used from result set reduction.
339 std::unique_ptr<Function> setup_reduce_loop(ReductionCode* reduction_code) {
340  return create_function("reduce_loop",
341  {{"this_buff", Type::Int8Ptr},
342  {"that_buff", Type::Int8Ptr},
343  {"start_index", Type::Int32},
344  {"end_index", Type::Int32},
345  {"that_entry_count", Type::Int32},
346  {"this_qmd_handle", Type::VoidPtr},
347  {"that_qmd_handle", Type::VoidPtr},
348  {"serialized_varlen_buffer", Type::VoidPtr}},
349  Type::Int32,
350  /*always_inline=*/false);
351 }
352 
353 llvm::Function* create_llvm_function(const Function* function, CgenState* cgen_state) {
354  AUTOMATIC_IR_METADATA(cgen_state);
355  auto& ctx = cgen_state->context_;
356  std::vector<llvm::Type*> parameter_types;
357  const auto& arg_types = function->arg_types();
358  for (const auto& named_arg : arg_types) {
359  CHECK(named_arg.type != Type::Void);
360  parameter_types.push_back(llvm_type(named_arg.type, ctx));
361  }
362  const auto func_type = llvm::FunctionType::get(
363  llvm_type(function->ret_type(), ctx), parameter_types, false);
364  const auto linkage = function->always_inline() ? llvm::Function::PrivateLinkage
365  : llvm::Function::ExternalLinkage;
366  auto func =
367  llvm::Function::Create(func_type, linkage, function->name(), cgen_state->module_);
368  const auto arg_it = func->arg_begin();
369  for (size_t i = 0; i < arg_types.size(); ++i) {
370  const auto arg = &*(arg_it + i);
371  arg->setName(arg_types[i].name);
372  }
373  if (function->always_inline()) {
375  }
376  return func;
377 }
378 
379 // Setup the reduction function and helpers declarations, create a module and a code
380 // generation state object.
382  ReductionCode reduction_code{};
383  reduction_code.ir_is_empty = setup_is_empty_entry(&reduction_code);
384  reduction_code.ir_reduce_one_entry = setup_reduce_one_entry(&reduction_code, hash_type);
385  reduction_code.ir_reduce_one_entry_idx = setup_reduce_one_entry_idx(&reduction_code);
386  reduction_code.ir_reduce_loop = setup_reduce_loop(&reduction_code);
387  return reduction_code;
388 }
389 
391  return hash_type == QueryDescriptionType::GroupByBaselineHash ||
394 }
395 
396 // Variable length sample fast path (no serialized variable length buffer).
397 void varlen_buffer_sample(int8_t* this_ptr1,
398  int8_t* this_ptr2,
399  const int8_t* that_ptr1,
400  const int8_t* that_ptr2,
401  const int64_t init_val) {
402  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
403  if (rhs_proj_col != init_val) {
404  *reinterpret_cast<int64_t*>(this_ptr1) = rhs_proj_col;
405  }
406  CHECK(this_ptr2 && that_ptr2);
407  *reinterpret_cast<int64_t*>(this_ptr2) = *reinterpret_cast<const int64_t*>(that_ptr2);
408 }
409 
410 } // namespace
411 
413  const void* serialized_varlen_buffer_handle,
414  int8_t* this_ptr1,
415  int8_t* this_ptr2,
416  const int8_t* that_ptr1,
417  const int8_t* that_ptr2,
418  const int64_t init_val,
419  const int64_t length_to_elems) {
420  if (!serialized_varlen_buffer_handle) {
421  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
422  return;
423  }
424  const auto& serialized_varlen_buffer =
425  *reinterpret_cast<const std::vector<std::string>*>(serialized_varlen_buffer_handle);
426  if (!serialized_varlen_buffer.empty()) {
427  const auto rhs_proj_col = *reinterpret_cast<const int64_t*>(that_ptr1);
428  CHECK_LT(static_cast<size_t>(rhs_proj_col), serialized_varlen_buffer.size());
429  const auto& varlen_bytes_str = serialized_varlen_buffer[rhs_proj_col];
430  const auto str_ptr = reinterpret_cast<const int8_t*>(varlen_bytes_str.c_str());
431  *reinterpret_cast<int64_t*>(this_ptr1) = reinterpret_cast<const int64_t>(str_ptr);
432  *reinterpret_cast<int64_t*>(this_ptr2) =
433  static_cast<int64_t>(varlen_bytes_str.size() / length_to_elems);
434  } else {
435  varlen_buffer_sample(this_ptr1, this_ptr2, that_ptr1, that_ptr2, init_val);
436  }
437 }
438 
439 // Wrappers to be called from the generated code, sharing implementation with the rest of
440 // the system.
441 
442 extern "C" void count_distinct_set_union_jit_rt(const int64_t new_set_handle,
443  const int64_t old_set_handle,
444  const void* that_qmd_handle,
445  const void* this_qmd_handle,
446  const int64_t target_logical_idx) {
447  const auto that_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(that_qmd_handle);
448  const auto this_qmd = reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
449  const auto& new_count_distinct_desc =
450  that_qmd->getCountDistinctDescriptor(target_logical_idx);
451  const auto& old_count_distinct_desc =
452  this_qmd->getCountDistinctDescriptor(target_logical_idx);
453  CHECK(old_count_distinct_desc.impl_type_ != CountDistinctImplType::Invalid);
454  CHECK(old_count_distinct_desc.impl_type_ == new_count_distinct_desc.impl_type_);
456  new_set_handle, old_set_handle, new_count_distinct_desc, old_count_distinct_desc);
457 }
458 
459 extern "C" void get_group_value_reduction_rt(int8_t* groups_buffer,
460  const int8_t* key,
461  const uint32_t key_count,
462  const void* this_qmd_handle,
463  const int8_t* that_buff,
464  const uint32_t that_entry_idx,
465  const uint32_t that_entry_count,
466  const uint32_t row_size_bytes,
467  int64_t** buff_out,
468  uint8_t* empty) {
469  const auto& this_qmd = *reinterpret_cast<const QueryMemoryDescriptor*>(this_qmd_handle);
470  const auto gvi = get_group_value_reduction(reinterpret_cast<int64_t*>(groups_buffer),
471  this_qmd.getEntryCount(),
472  reinterpret_cast<const int64_t*>(key),
473  key_count,
474  this_qmd.getEffectiveKeyWidth(),
475  this_qmd,
476  reinterpret_cast<const int64_t*>(that_buff),
477  that_entry_idx,
478  that_entry_count,
479  row_size_bytes >> 3);
480  *buff_out = gvi.first;
481  *empty = gvi.second;
482 }
483 
484 extern "C" uint8_t check_watchdog_rt(const size_t sample_seed) {
485  if (UNLIKELY(g_enable_dynamic_watchdog && (sample_seed & 0x3F) == 0 &&
486  dynamic_watchdog())) {
487  return true;
488  }
489  return false;
490 }
491 
493  const std::vector<TargetInfo>& targets,
494  const std::vector<int64_t>& target_init_vals)
495  : query_mem_desc_(query_mem_desc)
496  , targets_(targets)
497  , target_init_vals_(target_init_vals) {}
498 
499 // The code generated for a reduction between two result set buffers is structured in
500 // several functions and their IR is stored in the 'ReductionCode' structure. At a high
501 // level, the pseudocode is:
502 //
503 // func is_empty_func(row_ptr):
504 // ...
505 //
506 // func reduce_func_baseline(this_ptr, that_ptr):
507 // if is_empty_func(that_ptr):
508 // return
509 // for each target in the row:
510 // reduce target from that_ptr into this_ptr
511 //
512 // func reduce_func_perfect_hash(this_ptr, that_ptr):
513 // if is_empty_func(that_ptr):
514 // return
515 // for each target in the row:
516 // reduce target from that_ptr into this_ptr
517 //
518 // func reduce_func_idx(this_buff, that_buff, that_entry_index):
519 // that_ptr = that_result_set[that_entry_index]
520 // # Retrieval of 'this_ptr' is different between perfect hash and baseline.
521 // this_ptr = this_result_set[that_entry_index]
522 // or
523 // get_row(key(that_row_ptr), this_result_setBuffer)
524 // reduce_func_[baseline|perfect_hash](this_ptr, that_ptr)
525 //
526 // func reduce_loop(this_buff, that_buff, start_entry_index, end_entry_index):
527 // for that_entry_index in [start_entry_index, end_entry_index):
528 // reduce_func_idx(this_buff, that_buff, that_entry_index)
529 
531  const auto hash_type = query_mem_desc_.getQueryDescriptionType();
533  return {};
534  }
535  auto reduction_code = setup_functions_ir(hash_type);
536  isEmpty(reduction_code);
540  reduceOneEntryNoCollisions(reduction_code);
541  reduceOneEntryNoCollisionsIdx(reduction_code);
542  break;
543  }
545  reduceOneEntryBaseline(reduction_code);
546  reduceOneEntryBaselineIdx(reduction_code);
547  break;
548  }
549  default: {
550  LOG(FATAL) << "Unexpected query description type";
551  }
552  }
553  reduceLoop(reduction_code);
554  // For small result sets, avoid native code generation and use the interpreter instead.
557  return reduction_code;
558  }
559  std::lock_guard<std::mutex> reduction_guard(ReductionCode::s_reduction_mutex);
560  CodeCacheKey key{cacheKey()};
561  const auto compilation_context = s_code_cache.get(key);
562  if (compilation_context) {
563  auto cpu_context =
564  std::dynamic_pointer_cast<CpuCompilationContext>(compilation_context->first);
565  CHECK(cpu_context);
566  return {reinterpret_cast<ReductionCode::FuncPtr>(cpu_context->func()),
567  nullptr,
568  nullptr,
569  nullptr,
570  std::move(reduction_code.ir_is_empty),
571  std::move(reduction_code.ir_reduce_one_entry),
572  std::move(reduction_code.ir_reduce_one_entry_idx),
573  std::move(reduction_code.ir_reduce_loop)};
574  }
575  reduction_code.cgen_state.reset(new CgenState({}, false));
576  auto cgen_state = reduction_code.cgen_state.get();
577  std::unique_ptr<llvm::Module> module = runtime_module_shallow_copy(cgen_state);
578  cgen_state->module_ = module.get();
579  AUTOMATIC_IR_METADATA(cgen_state);
580  auto ir_is_empty = create_llvm_function(reduction_code.ir_is_empty.get(), cgen_state);
581  auto ir_reduce_one_entry =
582  create_llvm_function(reduction_code.ir_reduce_one_entry.get(), cgen_state);
583  auto ir_reduce_one_entry_idx =
584  create_llvm_function(reduction_code.ir_reduce_one_entry_idx.get(), cgen_state);
585  auto ir_reduce_loop =
586  create_llvm_function(reduction_code.ir_reduce_loop.get(), cgen_state);
587  std::unordered_map<const Function*, llvm::Function*> f;
588  f.emplace(reduction_code.ir_is_empty.get(), ir_is_empty);
589  f.emplace(reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry);
590  f.emplace(reduction_code.ir_reduce_one_entry_idx.get(), ir_reduce_one_entry_idx);
591  f.emplace(reduction_code.ir_reduce_loop.get(), ir_reduce_loop);
592  translate_function(reduction_code.ir_is_empty.get(), ir_is_empty, reduction_code, f);
594  reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry, reduction_code, f);
595  translate_function(reduction_code.ir_reduce_one_entry_idx.get(),
596  ir_reduce_one_entry_idx,
597  reduction_code,
598  f);
600  reduction_code.ir_reduce_loop.get(), ir_reduce_loop, reduction_code, f);
601  reduction_code.llvm_reduce_loop = ir_reduce_loop;
602  reduction_code.module = std::move(module);
604  return finalizeReductionCode(std::move(reduction_code),
605  ir_is_empty,
606  ir_reduce_one_entry,
607  ir_reduce_one_entry_idx,
608  key);
609 }
610 
612  // Clear stub cache to avoid crash caused by non-deterministic static destructor order
613  // of LLVM context and the cache.
616  g_rt_module = nullptr;
617 }
618 
619 void ResultSetReductionJIT::isEmpty(const ReductionCode& reduction_code) const {
620  auto ir_is_empty = reduction_code.ir_is_empty.get();
623  Value* key{nullptr};
624  Value* empty_key_val{nullptr};
625  const auto keys_ptr = ir_is_empty->arg(0);
630  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
631  target_init_vals_.size());
632  const int64_t target_slot_off =
634  const auto slot_ptr = ir_is_empty->add<GetElementPtr>(
635  keys_ptr,
636  ir_is_empty->addConstant<ConstantInt>(target_slot_off, Type::Int32),
637  "is_empty_slot_ptr");
638  const auto compact_sz =
640  key = emit_read_int_from_buff(slot_ptr, compact_sz, ir_is_empty);
641  empty_key_val = ir_is_empty->addConstant<ConstantInt>(
643  } else {
645  case 4: {
648  key = emit_load_i32(keys_ptr, ir_is_empty);
649  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_32, Type::Int32);
650  break;
651  }
652  case 8: {
653  key = emit_load_i64(keys_ptr, ir_is_empty);
654  empty_key_val = ir_is_empty->addConstant<ConstantInt>(EMPTY_KEY_64, Type::Int64);
655  break;
656  }
657  default:
658  LOG(FATAL) << "Invalid key width";
659  }
660  }
661  const auto ret =
662  ir_is_empty->add<ICmp>(ICmp::Predicate::EQ, key, empty_key_val, "is_key_empty");
663  ir_is_empty->add<Ret>(ret);
664 }
665 
667  const ReductionCode& reduction_code) const {
668  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
669  const auto this_row_ptr = ir_reduce_one_entry->arg(0);
670  const auto that_row_ptr = ir_reduce_one_entry->arg(1);
671  const auto that_is_empty =
672  ir_reduce_one_entry->add<Call>(reduction_code.ir_is_empty.get(),
673  std::vector<const Value*>{that_row_ptr},
674  "that_is_empty");
675  ir_reduce_one_entry->add<ReturnEarly>(
676  that_is_empty, ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32), "");
677 
678  const auto key_bytes = get_key_bytes_rowwise(query_mem_desc_);
679  if (key_bytes) { // copy the key from right hand side
680  ir_reduce_one_entry->add<MemCpy>(
681  this_row_ptr,
682  that_row_ptr,
683  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes, Type::Int32));
684  }
685 
686  const auto key_bytes_with_padding = align_to_int64(key_bytes);
687  const auto key_bytes_lv =
688  ir_reduce_one_entry->addConstant<ConstantInt>(key_bytes_with_padding, Type::Int32);
689  const auto this_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
690  this_row_ptr, key_bytes_lv, "this_targets_start");
691  const auto that_targets_start_ptr = ir_reduce_one_entry->add<GetElementPtr>(
692  that_row_ptr, key_bytes_lv, "that_targets_start");
693 
695  ir_reduce_one_entry, this_targets_start_ptr, that_targets_start_ptr);
696 }
697 
699  Function* ir_reduce_one_entry,
700  Value* this_targets_start_ptr,
701  Value* that_targets_start_ptr) const {
702  const auto& col_slot_context = query_mem_desc_.getColSlotContext();
703  Value* this_targets_ptr = this_targets_start_ptr;
704  Value* that_targets_ptr = that_targets_start_ptr;
705  size_t init_agg_val_idx = 0;
706  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
707  ++target_logical_idx) {
708  const auto& target_info = targets_[target_logical_idx];
709  const auto& slots_for_col = col_slot_context.getSlotsForCol(target_logical_idx);
710  Value* this_ptr2{nullptr};
711  Value* that_ptr2{nullptr};
712 
713  bool two_slot_target{false};
714  if (target_info.is_agg &&
715  (target_info.agg_kind == kAVG ||
716  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
717  // Note that this assumes if one of the slot pairs in a given target is an array,
718  // all slot pairs are arrays. Currently this is true for all geo targets, but we
719  // should better codify and store this information in the future
720  two_slot_target = true;
721  }
722 
723  for (size_t target_slot_idx = slots_for_col.front();
724  target_slot_idx < slots_for_col.back() + 1;
725  target_slot_idx += 2) {
726  const auto slot_off_val = query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx);
727  const auto slot_off =
728  ir_reduce_one_entry->addConstant<ConstantInt>(slot_off_val, Type::Int32);
729  if (UNLIKELY(two_slot_target)) {
730  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
731  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
732  this_targets_ptr, slot_off, "this_" + desc);
733  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
734  that_targets_ptr, slot_off, "that_" + desc);
735  }
736  reduceOneSlot(this_targets_ptr,
737  this_ptr2,
738  that_targets_ptr,
739  that_ptr2,
740  target_info,
741  target_logical_idx,
742  target_slot_idx,
743  init_agg_val_idx,
744  slots_for_col.front(),
745  ir_reduce_one_entry);
746  auto increment_agg_val_idx_maybe =
747  [&init_agg_val_idx, &target_logical_idx, this](const int slot_count) {
749  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
750  init_agg_val_idx += slot_count;
751  }
752  };
753  if (target_logical_idx + 1 == targets_.size() &&
754  target_slot_idx + 1 >= slots_for_col.back()) {
755  break;
756  }
757  const auto next_desc =
758  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
759  if (UNLIKELY(two_slot_target)) {
760  increment_agg_val_idx_maybe(2);
761  const auto two_slot_off = ir_reduce_one_entry->addConstant<ConstantInt>(
762  slot_off_val + query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx + 1),
763  Type::Int32);
764  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
765  this_targets_ptr, two_slot_off, "this_" + next_desc);
766  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
767  that_targets_ptr, two_slot_off, "that_" + next_desc);
768  } else {
769  increment_agg_val_idx_maybe(1);
770  this_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
771  this_targets_ptr, slot_off, "this_" + next_desc);
772  that_targets_ptr = ir_reduce_one_entry->add<GetElementPtr>(
773  that_targets_ptr, slot_off, "that_" + next_desc);
774  }
775  }
776  }
777  ir_reduce_one_entry->add<Ret>(
778  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32));
779 }
780 
782  const ReductionCode& reduction_code) const {
783  auto ir_reduce_one_entry = reduction_code.ir_reduce_one_entry.get();
784  const auto this_targets_ptr_arg = ir_reduce_one_entry->arg(0);
785  const auto that_targets_ptr_arg = ir_reduce_one_entry->arg(1);
786  Value* this_ptr1 = this_targets_ptr_arg;
787  Value* that_ptr1 = that_targets_ptr_arg;
788  size_t j = 0;
789  size_t init_agg_val_idx = 0;
790  for (size_t target_logical_idx = 0; target_logical_idx < targets_.size();
791  ++target_logical_idx) {
792  const auto& target_info = targets_[target_logical_idx];
793  Value* this_ptr2{nullptr};
794  Value* that_ptr2{nullptr};
795  if (target_info.is_agg &&
796  (target_info.agg_kind == kAVG ||
797  (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()))) {
798  const auto desc = "target_" + std::to_string(target_logical_idx) + "_second_slot";
799  const auto second_slot_rel_off =
800  ir_reduce_one_entry->addConstant<ConstantInt>(sizeof(int64_t), Type::Int32);
801  this_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
802  this_ptr1, second_slot_rel_off, "this_" + desc);
803  that_ptr2 = ir_reduce_one_entry->add<GetElementPtr>(
804  that_ptr1, second_slot_rel_off, "that_" + desc);
805  }
806  reduceOneSlot(this_ptr1,
807  this_ptr2,
808  that_ptr1,
809  that_ptr2,
810  target_info,
811  target_logical_idx,
812  j,
813  init_agg_val_idx,
814  j,
815  ir_reduce_one_entry);
816  if (target_logical_idx + 1 == targets_.size()) {
817  break;
818  }
820  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
821  } else {
822  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
823  init_agg_val_idx = advance_slot(init_agg_val_idx, target_info, false);
824  }
825  }
826  j = advance_slot(j, target_info, false);
827  const auto next_desc =
828  "target_" + std::to_string(target_logical_idx + 1) + "_first_slot";
829  auto next_slot_rel_off = ir_reduce_one_entry->addConstant<ConstantInt>(
830  init_agg_val_idx * sizeof(int64_t), Type::Int32);
831  this_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
832  this_targets_ptr_arg, next_slot_rel_off, next_desc);
833  that_ptr1 = ir_reduce_one_entry->add<GetElementPtr>(
834  that_targets_ptr_arg, next_slot_rel_off, next_desc);
835  }
836  ir_reduce_one_entry->add<Ret>(
837  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32));
838 }
839 
841  const ReductionCode& reduction_code) const {
842  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
847  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
848  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
849  const auto entry_idx = ir_reduce_one_entry_idx->arg(2);
850  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
851  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
852  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
853  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
855  const auto entry_idx_64 = ir_reduce_one_entry_idx->add<Cast>(
856  Cast::CastOp::SExt, entry_idx, Type::Int64, "entry_idx_64");
857  const auto row_off_in_bytes = ir_reduce_one_entry_idx->add<BinaryOperator>(
858  BinaryOperator::BinaryOp::Mul, entry_idx_64, row_bytes, "row_off_in_bytes");
859  const auto this_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
860  this_buff, row_off_in_bytes, "this_row_ptr");
861  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
862  that_buff, row_off_in_bytes, "that_row_ptr");
863  const auto reduce_rc = ir_reduce_one_entry_idx->add<Call>(
864  reduction_code.ir_reduce_one_entry.get(),
865  std::vector<const Value*>{this_row_ptr,
866  that_row_ptr,
867  this_qmd_handle,
868  that_qmd_handle,
869  serialized_varlen_buffer_arg},
870  "");
871  ir_reduce_one_entry_idx->add<Ret>(reduce_rc);
872 }
873 
875  const ReductionCode& reduction_code) const {
876  auto ir_reduce_one_entry_idx = reduction_code.ir_reduce_one_entry_idx.get();
881  const auto this_buff = ir_reduce_one_entry_idx->arg(0);
882  const auto that_buff = ir_reduce_one_entry_idx->arg(1);
883  const auto that_entry_idx = ir_reduce_one_entry_idx->arg(2);
884  const auto that_entry_count = ir_reduce_one_entry_idx->arg(3);
885  const auto this_qmd_handle = ir_reduce_one_entry_idx->arg(4);
886  const auto that_qmd_handle = ir_reduce_one_entry_idx->arg(5);
887  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry_idx->arg(6);
888  const auto row_bytes = ir_reduce_one_entry_idx->addConstant<ConstantInt>(
890  const auto that_entry_idx_64 = ir_reduce_one_entry_idx->add<Cast>(
891  Cast::CastOp::SExt, that_entry_idx, Type::Int64, "that_entry_idx_64");
892  const auto that_row_off_in_bytes =
893  ir_reduce_one_entry_idx->add<BinaryOperator>(BinaryOperator::BinaryOp::Mul,
894  that_entry_idx_64,
895  row_bytes,
896  "that_row_off_in_bytes");
897  const auto that_row_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
898  that_buff, that_row_off_in_bytes, "that_row_ptr");
899  const auto that_is_empty =
900  ir_reduce_one_entry_idx->add<Call>(reduction_code.ir_is_empty.get(),
901  std::vector<const Value*>{that_row_ptr},
902  "that_is_empty");
903  ir_reduce_one_entry_idx->add<ReturnEarly>(
904  that_is_empty,
905  ir_reduce_one_entry_idx->addConstant<ConstantInt>(0, Type::Int32),
906  "");
907  const auto key_count = query_mem_desc_.getGroupbyColCount();
908  const auto one_element =
909  ir_reduce_one_entry_idx->addConstant<ConstantInt>(1, Type::Int32);
910  const auto this_targets_ptr_i64_ptr = ir_reduce_one_entry_idx->add<Alloca>(
911  Type::Int64Ptr, one_element, "this_targets_ptr_out");
912  const auto this_is_empty_ptr =
913  ir_reduce_one_entry_idx->add<Alloca>(Type::Int8, one_element, "this_is_empty_out");
914  ir_reduce_one_entry_idx->add<ExternalCall>(
915  "get_group_value_reduction_rt",
916  Type::Void,
917  std::vector<const Value*>{
918  this_buff,
919  that_row_ptr,
920  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_count, Type::Int32),
921  this_qmd_handle,
922  that_buff,
923  that_entry_idx,
924  that_entry_count,
925  row_bytes,
926  this_targets_ptr_i64_ptr,
927  this_is_empty_ptr},
928  "");
929  const auto this_targets_ptr_i64 = ir_reduce_one_entry_idx->add<Load>(
930  this_targets_ptr_i64_ptr, "this_targets_ptr_i64");
931  auto this_is_empty =
932  ir_reduce_one_entry_idx->add<Load>(this_is_empty_ptr, "this_is_empty");
933  this_is_empty = ir_reduce_one_entry_idx->add<Cast>(
934  Cast::CastOp::Trunc, this_is_empty, Type::Int1, "this_is_empty_bool");
935  ir_reduce_one_entry_idx->add<ReturnEarly>(
936  this_is_empty,
937  ir_reduce_one_entry_idx->addConstant<ConstantInt>(0, Type::Int32),
938  "");
940  const auto this_targets_ptr = ir_reduce_one_entry_idx->add<Cast>(
941  Cast::CastOp::BitCast, this_targets_ptr_i64, Type::Int8Ptr, "this_targets_ptr");
942  const auto key_byte_count = key_qw_count * sizeof(int64_t);
943  const auto key_byte_count_lv =
944  ir_reduce_one_entry_idx->addConstant<ConstantInt>(key_byte_count, Type::Int32);
945  const auto that_targets_ptr = ir_reduce_one_entry_idx->add<GetElementPtr>(
946  that_row_ptr, key_byte_count_lv, "that_targets_ptr");
947  const auto reduce_rc = ir_reduce_one_entry_idx->add<Call>(
948  reduction_code.ir_reduce_one_entry.get(),
949  std::vector<const Value*>{this_targets_ptr,
950  that_targets_ptr,
951  this_qmd_handle,
952  that_qmd_handle,
953  serialized_varlen_buffer_arg},
954  "");
955  ir_reduce_one_entry_idx->add<Ret>(reduce_rc);
956 }
957 
958 namespace {
959 
960 void generate_loop_body(For* for_loop,
961  Function* ir_reduce_loop,
962  Function* ir_reduce_one_entry_idx,
963  Value* this_buff,
964  Value* that_buff,
965  Value* start_index,
966  Value* that_entry_count,
967  Value* this_qmd_handle,
968  Value* that_qmd_handle,
969  Value* serialized_varlen_buffer) {
970  const auto that_entry_idx = for_loop->add<BinaryOperator>(
971  BinaryOperator::BinaryOp::Add, for_loop->iter(), start_index, "that_entry_idx");
972  const auto watchdog_sample_seed =
973  for_loop->add<Cast>(Cast::CastOp::SExt, that_entry_idx, Type::Int64, "");
974  const auto watchdog_triggered =
975  for_loop->add<ExternalCall>("check_watchdog_rt",
976  Type::Int8,
977  std::vector<const Value*>{watchdog_sample_seed},
978  "");
979  const auto watchdog_triggered_bool =
980  for_loop->add<ICmp>(ICmp::Predicate::NE,
981  watchdog_triggered,
982  ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int8),
983  "");
984  for_loop->add<ReturnEarly>(
985  watchdog_triggered_bool,
986  ir_reduce_loop->addConstant<ConstantInt>(WATCHDOG_ERROR, Type::Int32),
987  "");
988  const auto reduce_rc =
989  for_loop->add<Call>(ir_reduce_one_entry_idx,
990  std::vector<const Value*>{this_buff,
991  that_buff,
992  that_entry_idx,
993  that_entry_count,
994  this_qmd_handle,
995  that_qmd_handle,
996  serialized_varlen_buffer},
997  "");
998 
999  auto reduce_rc_bool =
1000  for_loop->add<ICmp>(ICmp::Predicate::NE,
1001  reduce_rc,
1002  ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int32),
1003  "");
1004  for_loop->add<ReturnEarly>(reduce_rc_bool, reduce_rc, "");
1005 }
1006 
1007 } // namespace
1008 
1009 void ResultSetReductionJIT::reduceLoop(const ReductionCode& reduction_code) const {
1010  auto ir_reduce_loop = reduction_code.ir_reduce_loop.get();
1011  const auto this_buff_arg = ir_reduce_loop->arg(0);
1012  const auto that_buff_arg = ir_reduce_loop->arg(1);
1013  const auto start_index_arg = ir_reduce_loop->arg(2);
1014  const auto end_index_arg = ir_reduce_loop->arg(3);
1015  const auto that_entry_count_arg = ir_reduce_loop->arg(4);
1016  const auto this_qmd_handle_arg = ir_reduce_loop->arg(5);
1017  const auto that_qmd_handle_arg = ir_reduce_loop->arg(6);
1018  const auto serialized_varlen_buffer_arg = ir_reduce_loop->arg(7);
1019  For* for_loop =
1020  static_cast<For*>(ir_reduce_loop->add<For>(start_index_arg, end_index_arg, ""));
1021  generate_loop_body(for_loop,
1022  ir_reduce_loop,
1023  reduction_code.ir_reduce_one_entry_idx.get(),
1024  this_buff_arg,
1025  that_buff_arg,
1026  start_index_arg,
1027  that_entry_count_arg,
1028  this_qmd_handle_arg,
1029  that_qmd_handle_arg,
1030  serialized_varlen_buffer_arg);
1031  ir_reduce_loop->add<Ret>(ir_reduce_loop->addConstant<ConstantInt>(0, Type::Int32));
1032 }
1033 
1035  Value* this_ptr2,
1036  Value* that_ptr1,
1037  Value* that_ptr2,
1038  const TargetInfo& target_info,
1039  const size_t target_logical_idx,
1040  const size_t target_slot_idx,
1041  const size_t init_agg_val_idx,
1042  const size_t first_slot_idx_for_target,
1043  Function* ir_reduce_one_entry) const {
1045  if (query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
1046  return;
1047  }
1048  }
1049  const bool float_argument_input = takes_float_argument(target_info);
1050  const auto chosen_bytes =
1051  get_width_for_slot(target_slot_idx, float_argument_input, query_mem_desc_);
1052  CHECK_LT(init_agg_val_idx, target_init_vals_.size());
1053  auto init_val = target_init_vals_[init_agg_val_idx];
1054  if (target_info.is_agg &&
1055  (target_info.agg_kind != kSINGLE_VALUE && target_info.agg_kind != kSAMPLE)) {
1056  reduceOneAggregateSlot(this_ptr1,
1057  this_ptr2,
1058  that_ptr1,
1059  that_ptr2,
1060  target_info,
1061  target_logical_idx,
1062  target_slot_idx,
1063  init_val,
1064  chosen_bytes,
1065  ir_reduce_one_entry);
1066  } else if (target_info.agg_kind == kSINGLE_VALUE) {
1067  const auto checked_rc = emit_checked_write_projection(
1068  this_ptr1, that_ptr1, init_val, chosen_bytes, ir_reduce_one_entry);
1069 
1070  auto checked_rc_bool = ir_reduce_one_entry->add<ICmp>(
1072  checked_rc,
1073  ir_reduce_one_entry->addConstant<ConstantInt>(0, Type::Int32),
1074  "");
1075 
1076  ir_reduce_one_entry->add<ReturnEarly>(checked_rc_bool, checked_rc, "");
1077 
1078  } else {
1080  this_ptr1, that_ptr1, init_val, chosen_bytes, ir_reduce_one_entry);
1081  if (target_info.agg_kind == kSAMPLE && target_info.sql_type.is_varlen()) {
1082  CHECK(this_ptr2 && that_ptr2);
1083  size_t length_to_elems{0};
1084  if (target_info.sql_type.is_geometry()) {
1085  // TODO: Assumes hard-coded sizes for geometry targets
1086  length_to_elems = target_slot_idx == first_slot_idx_for_target ? 1 : 4;
1087  } else {
1088  const auto& elem_ti = target_info.sql_type.get_elem_type();
1089  length_to_elems = target_info.sql_type.is_string() ? 1 : elem_ti.get_size();
1090  }
1091  const auto serialized_varlen_buffer_arg = ir_reduce_one_entry->arg(4);
1092  ir_reduce_one_entry->add<ExternalCall>(
1093  "serialized_varlen_buffer_sample",
1094  Type::Void,
1095  std::vector<const Value*>{
1096  serialized_varlen_buffer_arg,
1097  this_ptr1,
1098  this_ptr2,
1099  that_ptr1,
1100  that_ptr2,
1101  ir_reduce_one_entry->addConstant<ConstantInt>(init_val, Type::Int64),
1102  ir_reduce_one_entry->addConstant<ConstantInt>(length_to_elems,
1103  Type::Int64)},
1104  "");
1105  }
1106  }
1107 }
1108 
1110  Value* this_ptr2,
1111  Value* that_ptr1,
1112  Value* that_ptr2,
1113  const TargetInfo& target_info,
1114  const size_t target_logical_idx,
1115  const size_t target_slot_idx,
1116  const int64_t init_val,
1117  const int8_t chosen_bytes,
1118  Function* ir_reduce_one_entry) const {
1119  switch (target_info.agg_kind) {
1120  case kCOUNT:
1121  case kAPPROX_COUNT_DISTINCT: {
1122  if (is_distinct_target(target_info)) {
1123  CHECK_EQ(static_cast<size_t>(chosen_bytes), sizeof(int64_t));
1125  this_ptr1, that_ptr1, target_logical_idx, ir_reduce_one_entry);
1126  break;
1127  }
1128  CHECK_EQ(int64_t(0), init_val);
1129  emit_aggregate_one_count(this_ptr1, that_ptr1, chosen_bytes, ir_reduce_one_entry);
1130  break;
1131  }
1132  case kAVG: {
1133  // Ignore float argument compaction for count component for fear of its overflow
1134  emit_aggregate_one_count(this_ptr2,
1135  that_ptr2,
1136  query_mem_desc_.getPaddedSlotWidthBytes(target_slot_idx),
1137  ir_reduce_one_entry);
1138  }
1139  // fall thru
1140  case kSUM: {
1142  this_ptr1,
1143  that_ptr1,
1144  init_val,
1145  chosen_bytes,
1146  target_info,
1147  ir_reduce_one_entry);
1148  break;
1149  }
1150  case kMIN: {
1152  this_ptr1,
1153  that_ptr1,
1154  init_val,
1155  chosen_bytes,
1156  target_info,
1157  ir_reduce_one_entry);
1158  break;
1159  }
1160  case kMAX: {
1162  this_ptr1,
1163  that_ptr1,
1164  init_val,
1165  chosen_bytes,
1166  target_info,
1167  ir_reduce_one_entry);
1168  break;
1169  }
1170  default:
1171  LOG(FATAL) << "Invalid aggregate type";
1172  }
1173 }
1174 
1176  Value* this_ptr1,
1177  Value* that_ptr1,
1178  const size_t target_logical_idx,
1179  Function* ir_reduce_one_entry) const {
1181  const auto old_set_handle = emit_load_i64(this_ptr1, ir_reduce_one_entry);
1182  const auto new_set_handle = emit_load_i64(that_ptr1, ir_reduce_one_entry);
1183  const auto this_qmd_arg = ir_reduce_one_entry->arg(2);
1184  const auto that_qmd_arg = ir_reduce_one_entry->arg(3);
1185  ir_reduce_one_entry->add<ExternalCall>(
1186  "count_distinct_set_union_jit_rt",
1187  Type::Void,
1188  std::vector<const Value*>{
1189  new_set_handle,
1190  old_set_handle,
1191  that_qmd_arg,
1192  this_qmd_arg,
1193  ir_reduce_one_entry->addConstant<ConstantInt>(target_logical_idx, Type::Int64)},
1194  "");
1195 }
1196 
1198  ReductionCode reduction_code,
1199  const llvm::Function* ir_is_empty,
1200  const llvm::Function* ir_reduce_one_entry,
1201  const llvm::Function* ir_reduce_one_entry_idx,
1202  const CodeCacheKey& key) const {
1203  CompilationOptions co{
1205 
1206 #ifdef NDEBUG
1207  LOG(IR) << "Reduction Loop:\n"
1208  << serialize_llvm_object(reduction_code.llvm_reduce_loop);
1209  LOG(IR) << "Reduction Is Empty Func:\n" << serialize_llvm_object(ir_is_empty);
1210  LOG(IR) << "Reduction One Entry Func:\n" << serialize_llvm_object(ir_reduce_one_entry);
1211  LOG(IR) << "Reduction One Entry Idx Func:\n"
1212  << serialize_llvm_object(ir_reduce_one_entry_idx);
1213 #else
1214  LOG(IR) << serialize_llvm_object(reduction_code.cgen_state->module_);
1215 #endif
1216 
1217  reduction_code.module.release();
1219  reduction_code.llvm_reduce_loop, {reduction_code.llvm_reduce_loop}, co);
1220  reduction_code.func_ptr = reinterpret_cast<ReductionCode::FuncPtr>(
1221  ee->getPointerToFunction(reduction_code.llvm_reduce_loop));
1222 
1223  auto cpu_compilation_context = std::make_shared<CpuCompilationContext>(std::move(ee));
1224  cpu_compilation_context->setFunctionPointer(reduction_code.llvm_reduce_loop);
1225  reduction_code.compilation_context = cpu_compilation_context;
1227  reduction_code.compilation_context,
1228  reduction_code.llvm_reduce_loop->getParent(),
1229  s_code_cache);
1230  return reduction_code;
1231 }
1232 
1233 namespace {
1234 
1235 std::string target_info_key(const TargetInfo& target_info) {
1236  return std::to_string(target_info.is_agg) + "\n" +
1237  std::to_string(target_info.agg_kind) + "\n" +
1238  target_info.sql_type.get_type_name() + "\n" +
1239  std::to_string(target_info.sql_type.get_notnull()) + "\n" +
1240  target_info.agg_arg_type.get_type_name() + "\n" +
1241  std::to_string(target_info.agg_arg_type.get_notnull()) + "\n" +
1242  std::to_string(target_info.skip_null_val) + "\n" +
1243  std::to_string(target_info.is_distinct);
1244 }
1245 
1246 } // namespace
1247 
1248 std::string ResultSetReductionJIT::cacheKey() const {
1249  std::vector<std::string> target_init_vals_strings;
1250  std::transform(target_init_vals_.begin(),
1251  target_init_vals_.end(),
1252  std::back_inserter(target_init_vals_strings),
1253  [](const int64_t v) { return std::to_string(v); });
1254  const auto target_init_vals_key =
1255  boost::algorithm::join(target_init_vals_strings, ", ");
1256  std::vector<std::string> targets_strings;
1257  std::transform(
1258  targets_.begin(),
1259  targets_.end(),
1260  std::back_inserter(targets_strings),
1261  [](const TargetInfo& target_info) { return target_info_key(target_info); });
1262  const auto targets_key = boost::algorithm::join(targets_strings, ", ");
1263  return query_mem_desc_.reductionKey() + "\n" + target_init_vals_key + "\n" +
1264  targets_key;
1265 }
1266 
1268  const auto hash_type = query_mem_desc_.getQueryDescriptionType();
1269  auto reduction_code = setup_functions_ir(hash_type);
1271  isEmpty(reduction_code);
1272  reduceOneEntryNoCollisions(reduction_code);
1273  reduceOneEntryNoCollisionsIdx(reduction_code);
1274  reduceLoop(reduction_code);
1275  reduction_code.cgen_state.reset(new CgenState({}, false));
1276  auto cgen_state = reduction_code.cgen_state.get();
1277  std::unique_ptr<llvm::Module> module(runtime_module_shallow_copy(cgen_state));
1278 
1279  cgen_state->module_ = module.get();
1280  AUTOMATIC_IR_METADATA(cgen_state);
1281  auto ir_is_empty = create_llvm_function(reduction_code.ir_is_empty.get(), cgen_state);
1282  auto ir_reduce_one_entry =
1283  create_llvm_function(reduction_code.ir_reduce_one_entry.get(), cgen_state);
1284  auto ir_reduce_one_entry_idx =
1285  create_llvm_function(reduction_code.ir_reduce_one_entry_idx.get(), cgen_state);
1286  auto ir_reduce_loop =
1287  create_llvm_function(reduction_code.ir_reduce_loop.get(), cgen_state);
1288  std::unordered_map<const Function*, llvm::Function*> f;
1289  f.emplace(reduction_code.ir_is_empty.get(), ir_is_empty);
1290  f.emplace(reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry);
1291  f.emplace(reduction_code.ir_reduce_one_entry_idx.get(), ir_reduce_one_entry_idx);
1292  f.emplace(reduction_code.ir_reduce_loop.get(), ir_reduce_loop);
1293  translate_function(reduction_code.ir_is_empty.get(), ir_is_empty, reduction_code, f);
1295  reduction_code.ir_reduce_one_entry.get(), ir_reduce_one_entry, reduction_code, f);
1296  translate_function(reduction_code.ir_reduce_one_entry_idx.get(),
1297  ir_reduce_one_entry_idx,
1298  reduction_code,
1299  f);
1301  reduction_code.ir_reduce_loop.get(), ir_reduce_loop, reduction_code, f);
1302  reduction_code.llvm_reduce_loop = ir_reduce_loop;
1303  reduction_code.module = std::move(module);
1304  return reduction_code;
1305 }
void emit_aggregate_one_nullable_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const int64_t init_val, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
std::unique_ptr< CgenState > cgen_state
void clear()
Definition: LruCache.hpp:60
#define CHECK_EQ(x, y)
Definition: Logger.h:205
Value * add(Args &&... args)
void reduceOneEntryBaseline(const ReductionCode &reduction_code) const
bool is_aggregate_query(const QueryDescriptionType hash_type)
void isEmpty(const ReductionCode &reduction_code) const
void reduceOneEntryNoCollisionsIdx(const ReductionCode &reduction_code) const
void count_distinct_set_union(const int64_t new_set_handle, const int64_t old_set_handle, const CountDistinctDescriptor &new_count_distinct_desc, const CountDistinctDescriptor &old_count_distinct_desc)
#define EMPTY_KEY_64
void count_distinct_set_union_jit_rt(const int64_t new_set_handle, const int64_t old_set_handle, const void *that_qmd_handle, const void *this_qmd_handle, const int64_t target_logical_idx)
bool is_string() const
Definition: sqltypes.h:417
std::unique_ptr< llvm::Module > runtime_module_shallow_copy(CgenState *cgen_state)
static ExecutionEngineWrapper generateNativeCPUCode(llvm::Function *func, const std::unordered_set< llvm::Function *> &live_funcs, const CompilationOptions &co)
void serialized_varlen_buffer_sample(const void *serialized_varlen_buffer_handle, int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val, const int64_t length_to_elems)
void reduceOneSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const size_t init_agg_val_idx, const size_t first_slot_idx_for_target, Function *ir_reduce_one_entry) const
std::shared_ptr< CompilationContext > compilation_context
void varlen_buffer_sample(int8_t *this_ptr1, int8_t *this_ptr2, const int8_t *that_ptr1, const int8_t *that_ptr2, const int64_t init_val)
std::unique_ptr< Function > ir_reduce_loop
Value * emit_read_int_from_buff(Value *ptr, const int8_t compact_sz, Function *function)
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
#define LOG(tag)
Definition: Logger.h:188
void mark_function_always_inline(llvm::Function *func)
void get_group_value_reduction_rt(int8_t *groups_buffer, const int8_t *key, const uint32_t key_count, const void *this_qmd_handle, const int8_t *that_buff, const uint32_t that_entry_idx, const uint32_t that_entry_count, const uint32_t row_size_bytes, int64_t **buff_out, uint8_t *empty)
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
std::string join(T const &container, std::string const &delim)
bool dynamic_watchdog()
size_t getCountDistinctDescriptorsSize() const
llvm::Function * llvm_reduce_loop
#define CHECK_GE(x, y)
Definition: Logger.h:210
void reduceOneEntryTargetsNoCollisions(Function *ir_reduce_one_entry, Value *this_targets_start_ptr, Value *that_targets_start_ptr) const
std::vector< std::string > CodeCacheKey
Definition: CodeCache.h:25
std::unique_ptr< llvm::Module > g_rt_module
size_t get_slot_off_quad(const QueryMemoryDescriptor &query_mem_desc)
std::unique_ptr< Function > ir_reduce_one_entry
bool g_enable_dynamic_watchdog
Definition: Execute.cpp:75
const std::vector< int64_t > target_init_vals_
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:133
bool is_varlen() const
Definition: sqltypes.h:432
bool skip_null_val
Definition: TargetInfo.h:44
const Value * emit_checked_write_projection(Value *slot_pi8, Value *other_pi8, const int64_t init_val, const size_t chosen_bytes, Function *ir_reduce_one_entry)
int8_t get_width_for_slot(const size_t target_slot_idx, const bool float_argument_input, const QueryMemoryDescriptor &query_mem_desc)
const int64_t const uint32_t const uint32_t key_qw_count
std::unique_ptr< Function > setup_reduce_one_entry_idx(ReductionCode *reduction_code)
int32_t(*)(int8_t *this_buff, const int8_t *that_buff, const int32_t start_entry_index, const int32_t end_entry_index, const int32_t that_entry_count, const void *this_qmd, const void *that_qmd, const void *serialized_varlen_buffer) FuncPtr
const QueryMemoryDescriptor query_mem_desc_
std::unique_ptr< Function > ir_is_empty
virtual ReductionCode codegen() const
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
void emit_aggregate_one_value(const std::string &agg_kind, Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, const TargetInfo &agg_info, Function *ir_reduce_one_entry)
const Executor * getExecutor() const
Value * emit_load_i32(Value *ptr, Function *function)
Definition: sqldefs.h:73
const SQLTypeInfo get_compact_type(const TargetInfo &target)
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:266
llvm::Module * module_
Definition: CgenState.h:330
ResultSetReductionJIT(const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const std::vector< int64_t > &target_init_vals)
llvm::LLVMContext & context_
Definition: CgenState.h:339
bool is_agg
Definition: TargetInfo.h:40
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
ReductionCode finalizeReductionCode(ReductionCode reduction_code, const llvm::Function *ir_is_empty, const llvm::Function *ir_reduce_one_entry, const llvm::Function *ir_reduce_one_entry_idx, const CodeCacheKey &key) const
GroupValueInfo get_group_value_reduction(int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const size_t key_width, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
void generate_loop_body(For *for_loop, Function *ir_reduce_loop, Function *ir_reduce_one_entry_idx, Value *this_buff, Value *that_buff, Value *start_index, Value *that_entry_count, Value *this_qmd_handle, Value *that_qmd_handle, Value *serialized_varlen_buffer)
void emit_write_projection(Value *slot_pi8, Value *other_pi8, const int64_t init_val, const size_t chosen_bytes, Function *ir_reduce_one_entry)
void reduceOneEntryNoCollisions(const ReductionCode &reduction_code) const
void translate_function(const Function *function, llvm::Function *llvm_function, const ReductionCode &reduction_code, const std::unordered_map< const Function *, llvm::Function *> &f)
uint8_t check_watchdog_rt(const size_t sample_seed)
Definition: sqldefs.h:75
static std::mutex s_reduction_mutex
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:129
std::string target_info_key(const TargetInfo &target_info)
std::unique_ptr< Function > ir_reduce_one_entry_idx
ReductionCode setup_functions_ir(const QueryDescriptionType hash_type)
#define AUTOMATIC_IR_METADATA(CGENSTATE)
size_t targetGroupbyIndicesSize() const
std::unique_ptr< Function > setup_is_empty_entry(ReductionCode *reduction_code)
SQLAgg agg_kind
Definition: TargetInfo.h:41
static void addCodeToCache(const CodeCacheKey &, std::shared_ptr< CompilationContext >, llvm::Module *, CodeCache &)
#define AUTOMATIC_IR_METADATA_DONE()
#define UNLIKELY(x)
Definition: likely.h:25
llvm::Type * llvm_type(const Type type, llvm::LLVMContext &ctx)
int32_t getTargetIdxForKey() const
void reduceOneCountDistinctSlot(Value *this_ptr1, Value *that_ptr1, const size_t target_logical_idx, Function *ir_reduce_one_entry) const
#define CHECK_LT(x, y)
Definition: Logger.h:207
bool is_geometry() const
Definition: sqltypes.h:429
std::string serialize_llvm_object(const T *llvm_obj)
std::string get_type_name() const
Definition: sqltypes.h:362
size_t get_row_bytes(const QueryMemoryDescriptor &query_mem_desc)
int64_t getTargetGroupbyIndex(const size_t target_idx) const
void reduceOneEntryBaselineIdx(const ReductionCode &reduction_code) const
Definition: sqldefs.h:76
std::unique_ptr< Function > create_function(const std::string name, const std::vector< Function::NamedArg > &arg_types, const Type ret_type, const bool always_inline)
std::string reductionKey() const
std::unique_ptr< Function > setup_reduce_one_entry(ReductionCode *reduction_code, const QueryDescriptionType hash_type)
Value * emit_load_i64(Value *ptr, Function *function)
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:624
#define CHECK(condition)
Definition: Logger.h:197
const ColSlotContext & getColSlotContext() const
#define EMPTY_KEY_32
std::unique_ptr< llvm::Module > module
QueryDescriptionType
Definition: Types.h:26
const Value * iter() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
Value * emit_load(Value *ptr, Type ptr_type, Function *function)
value_t * get(const key_t &key)
Definition: LruCache.hpp:39
llvm::Function * create_llvm_function(const Function *function, CgenState *cgen_state)
bool is_distinct
Definition: TargetInfo.h:45
void reduceLoop(const ReductionCode &reduction_code) const
void emit_aggregate_one_count(Value *val_ptr, Value *other_ptr, const size_t chosen_bytes, Function *ir_reduce_one_entry)
QueryDescriptionType getQueryDescriptionType() const
Definition: sqldefs.h:74
virtual ReductionCode codegen() const
Definition: sqldefs.h:72
std::unique_ptr< Function > setup_reduce_loop(ReductionCode *reduction_code)
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
const std::string & label() const
size_t getEffectiveKeyWidth() const
void reduceOneAggregateSlot(Value *this_ptr1, Value *this_ptr2, Value *that_ptr1, Value *that_ptr2, const TargetInfo &target_info, const size_t target_logical_idx, const size_t target_slot_idx, const int64_t init_val, const int8_t chosen_bytes, Function *ir_reduce_one_entry) const
const std::vector< TargetInfo > targets_