32 #include "../CudaMgr/CudaMgr.h"
33 #include "../Shared/checked_alloc.h"
34 #include "../Shared/funcannotations.h"
35 #include "../Utils/ChunkIter.h"
44 #include <llvm/Transforms/Utils/BasicBlockUtils.h>
49 #include <string_view>
61 int32_t
get_agg_count(
const std::vector<Analyzer::Expr*>& target_exprs) {
63 for (
auto target_expr : target_exprs) {
66 if (!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE) {
70 }
else if (ti.is_geometry()) {
71 agg_count += ti.get_physical_coord_cols() * 2;
77 if (agg_expr && agg_expr->get_aggtype() ==
kAVG) {
93 if (!cd || !cd->isVirtualCol) {
101 for (
const auto& target_expr : ra_exe_unit.
target_exprs) {
111 const int64_t max_entry_count) {
137 const std::vector<InputTableInfo>& query_infos,
139 Executor* executor) {
145 expr, query_infos, executor, boost::make_optional(ra_exe_unit.
simple_quals));
146 switch (expr_range.getType()) {
148 if (expr_range.getIntMin() > expr_range.getIntMax()) {
153 expr_range.getIntMin(),
154 expr_range.getIntMax(),
155 expr_range.getBucket(),
156 expr_range.hasNulls()};
160 if (expr_range.getFpMin() > expr_range.getFpMax()) {
183 const int64_t baseline_threshold =
191 bool has_nulls{
false};
201 cardinality *= crt_col_cardinality;
202 if (col_range_info.has_nulls) {
207 if (!cardinality || cardinality > baseline_threshold) {
212 int64_t(cardinality),
231 return col_range_info;
233 static const int64_t MAX_BUFFER_SIZE = 1 << 30;
234 const int64_t col_count =
236 int64_t max_entry_count = MAX_BUFFER_SIZE / (col_count *
sizeof(int64_t));
238 max_entry_count = std::min(max_entry_count, baseline_threshold);
241 if (groupby_expr_ti.is_string() && !col_range_info.bucket) {
244 const bool has_filters =
269 col_range_info.has_nulls};
272 return col_range_info;
284 col_range_info.has_nulls};
290 !col_range_info.bucket) {
295 col_range_info.has_nulls};
297 return col_range_info;
303 if (col_range_info.
bucket) {
304 crt_col_cardinality /= col_range_info.
bucket;
306 return static_cast<int64_t
>(crt_col_cardinality +
307 (1 + (col_range_info.
has_nulls ? 1 : 0)));
313 if (col_range_info.
min <= col_range_info.
max) {
314 size_t size = col_range_info.
max - col_range_info.
min;
315 if (col_range_info.
bucket) {
316 size /= col_range_info.
bucket;
318 CHECK_LT(size, std::numeric_limits<int64_t>::max());
319 return static_cast<int64_t
>(size + 1);
326 #define LL_CONTEXT executor_->cgen_state_->context_
327 #define LL_BUILDER executor_->cgen_state_->ir_builder_
328 #define LL_BOOL(v) executor_->cgen_state_->llBool(v)
329 #define LL_INT(v) executor_->cgen_state_->llInt(v)
330 #define LL_FP(v) executor_->cgen_state_->llFp(v)
331 #define ROW_FUNC executor_->cgen_state_->row_func_
332 #define CUR_FUNC executor_->cgen_state_->current_func_
338 const std::vector<InputTableInfo>& query_infos,
339 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
340 const std::optional<int64_t>& group_cardinality_estimation)
341 : executor_(executor)
342 , ra_exe_unit_(ra_exe_unit)
343 , query_infos_(query_infos)
344 , row_set_mem_owner_(row_set_mem_owner)
345 , device_type_(device_type)
346 , group_cardinality_estimation_(group_cardinality_estimation) {
351 const auto& groupby_ti = groupby_expr->get_type_info();
352 if (groupby_ti.is_bytes()) {
353 throw std::runtime_error(
354 "Cannot group by string columns which are not dictionary encoded.");
356 if (groupby_ti.is_buffer()) {
357 throw std::runtime_error(
"Group by buffer not supported");
359 if (groupby_ti.is_geometry()) {
360 throw std::runtime_error(
"Group by geometry not supported");
366 const size_t shard_count)
const {
367 size_t device_count{0};
369 device_count =
executor_->cudaMgr()->getDeviceCount();
373 int64_t bucket{col_range_info.
bucket};
396 if (device_count < shard_count) {
397 bucket =
g_leaf_count ? std::max(device_count, static_cast<size_t>(1))
398 : std::min(device_count, shard_count - device_count);
400 bucket = shard_count * std::max(
g_leaf_count, static_cast<size_t>(1));
419 const std::vector<InputTableInfo>& query_infos,
420 const bool is_group_by,
421 Executor* executor) {
422 bool keyless{
true}, found{
false};
423 int32_t num_agg_expr{0};
425 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
428 if (agg_info.is_agg) {
434 const auto arg_expr =
agg_arg(target_expr);
436 switch (agg_info.agg_kind) {
439 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
442 expr_range_info.hasNulls()) {
449 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
452 expr_range_info.hasNulls()) {
459 auto arg_ti = arg_expr->get_type_info();
461 arg_ti.set_notnull(
true);
463 if (!arg_ti.get_notnull()) {
466 !expr_range_info.hasNulls()) {
471 switch (expr_range_info.getType()) {
474 if (expr_range_info.getFpMax() < 0 || expr_range_info.getFpMin() > 0) {
479 if (expr_range_info.getIntMax() < 0 || expr_range_info.getIntMin() > 0) {
490 CHECK(agg_expr && agg_expr->get_arg());
491 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
492 if (arg_ti.is_string() || arg_ti.is_buffer()) {
495 auto expr_range_info =
499 is_group_by || float_argument_input,
500 float_argument_input ?
sizeof(
float) : 8);
501 switch (expr_range_info.getType()) {
505 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_max));
506 if (expr_range_info.getFpMax() < double_max) {
512 if (expr_range_info.getIntMax() < init_max) {
522 CHECK(agg_expr && agg_expr->get_arg());
523 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
524 if (arg_ti.is_string() || arg_ti.is_buffer()) {
527 auto expr_range_info =
532 expr_range_info.hasNulls()) {
537 is_group_by || float_argument_input,
538 float_argument_input ?
sizeof(
float) : 8);
539 switch (expr_range_info.getType()) {
543 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_min));
544 if (expr_range_info.getFpMin() > double_min) {
550 if (expr_range_info.getIntMin() > init_min) {
581 const std::vector<InputTableInfo>& query_infos,
583 Executor* executor) {
585 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
588 CHECK(agg_info.is_agg);
592 if (arg_ti.is_bytes()) {
593 throw std::runtime_error(
594 "Strings must be dictionary-encoded for COUNT(DISTINCT).");
597 throw std::runtime_error(
"APPROX_COUNT_DISTINCT on arrays not supported yet");
600 throw std::runtime_error(
601 "APPROX_COUNT_DISTINCT on geometry columns not supported");
603 if (agg_info.is_distinct && arg_ti.is_geometry()) {
604 throw std::runtime_error(
"COUNT DISTINCT on geometry columns not supported");
607 auto arg_range_info =
608 arg_ti.is_fp() ? no_range_info
610 ra_exe_unit, query_infos, agg_expr->get_arg(), executor);
612 int64_t bitmap_sz_bits{0};
614 const auto error_rate = agg_expr->get_arg1();
616 CHECK(error_rate->get_type_info().get_type() ==
kINT);
617 CHECK_GE(error_rate->get_constval().intval, 1);
623 if (arg_range_info.isEmpty()) {
624 count_distinct_descriptors.emplace_back(
634 !(arg_ti.is_buffer() || arg_ti.is_geometry())) {
637 if (agg_info.agg_kind ==
kCOUNT) {
646 !(arg_ti.is_array() || arg_ti.is_geometry())) {
654 const auto sub_bitmap_count =
656 count_distinct_descriptors.emplace_back(
668 return count_distinct_descriptors;
674 const bool allow_multifrag,
675 const size_t max_groups_buffer_entry_count,
676 const int8_t crt_min_byte_width,
678 const bool output_columnar_hint) {
679 const auto shard_count =
683 bool sort_on_gpu_hint =
691 bool must_use_baseline_sort = shard_count;
695 max_groups_buffer_entry_count,
699 must_use_baseline_sort,
700 output_columnar_hint);
701 CHECK(query_mem_desc);
702 if (query_mem_desc->sortOnGpu() &&
704 align_to_int64(query_mem_desc->getEntryCount() *
sizeof(int32_t))) >
705 2 * 1024 * 1024 * 1024LL) {
706 must_use_baseline_sort =
true;
707 sort_on_gpu_hint =
false;
716 const bool allow_multifrag,
717 const size_t max_groups_buffer_entry_count,
718 const int8_t crt_min_byte_width,
719 const bool sort_on_gpu_hint,
721 const bool must_use_baseline_sort,
722 const bool output_columnar_hint) {
730 const auto shard_count =
735 const auto col_range_info =
737 col_range_info_nosharding.min,
738 col_range_info_nosharding.max,
740 col_range_info_nosharding.has_nulls};
744 const auto keyless_info =
755 (col_range_info.max - col_range_info.min) /
756 std::max(col_range_info.bucket, int64_t(1)) >
771 max_groups_buffer_entry_count,
773 count_distinct_descriptors,
774 must_use_baseline_sort,
775 output_columnar_hint,
778 LOG(
WARNING) << e.what() <<
" Disabling Streaming Top N.";
789 max_groups_buffer_entry_count,
791 count_distinct_descriptors,
792 must_use_baseline_sort,
793 output_columnar_hint,
799 const std::list<Analyzer::OrderEntry>& order_entries) {
800 if (order_entries.size() > 1) {
803 for (
const auto& order_entry : order_entries) {
807 if (!dynamic_cast<Analyzer::AggExpr*>(target_expr)) {
812 if (agg_expr->get_is_distinct() || agg_expr->get_aggtype() ==
kAVG ||
813 agg_expr->get_aggtype() ==
kMIN || agg_expr->get_aggtype() ==
kMAX ||
817 if (agg_expr->get_arg()) {
819 if (arg_ti.is_fp()) {
822 auto expr_range_info =
827 expr_range_info.has_nulls) &&
828 order_entry.is_desc == order_entry.nulls_first) {
832 const auto& target_ti = target_expr->get_type_info();
833 CHECK(!target_ti.is_buffer());
834 if (!target_ti.is_integer()) {
842 llvm::BasicBlock* sc_false,
847 CHECK(filter_result);
849 bool can_return_error =
false;
850 llvm::BasicBlock* filter_false{
nullptr};
872 llvm::Value* old_total_matched_val{
nullptr};
874 old_total_matched_val =
875 LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
878 #
if LLVM_VERSION_MAJOR > 12
881 llvm::AtomicOrdering::Monotonic);
883 old_total_matched_val =
LL_BUILDER.CreateLoad(
884 total_matched_ptr->getType()->getPointerElementType(), total_matched_ptr);
890 LL_BUILDER.CreateStore(old_total_matched_val, old_total_matched_ptr);
893 auto agg_out_ptr_w_idx =
codegenGroupBy(query_mem_desc, co, filter_cfg);
899 filter_cfg.setChainToNext();
904 varlen_output_buffer,
912 llvm::Value* nullcheck_cond{
nullptr};
914 nullcheck_cond =
LL_BUILDER.CreateICmpSGE(std::get<1>(agg_out_ptr_w_idx),
918 std::get<0>(agg_out_ptr_w_idx),
919 llvm::ConstantPointerNull::get(
923 nullcheck_cond,
executor_,
false,
"groupby_nullcheck", &filter_cfg,
false);
925 varlen_output_buffer,
932 can_return_error =
true;
942 code_generator.
posArg(
nullptr),
948 std::stack<llvm::BasicBlock*> array_loops;
951 auto arg_it =
ROW_FUNC->arg_begin();
952 std::vector<llvm::Value*> agg_out_vec;
954 agg_out_vec.push_back(&*arg_it++);
969 }
else if (sc_false) {
970 const auto saved_insert_block =
LL_BUILDER.GetInsertBlock();
973 LL_BUILDER.SetInsertPoint(saved_insert_block);
976 return can_return_error;
980 llvm::Value* groups_buffer,
994 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
998 CHECK_GE(only_order_entry.tle_no,
int(1));
999 const size_t target_idx = only_order_entry.tle_no - 1;
1002 const auto chosen_bytes =
1004 auto order_entry_lv =
executor_->cgen_state_->castToTypeIn(
1005 code_generator.
codegen(order_entry_expr,
true, co).front(), chosen_bytes * 8);
1007 std::string fname =
"get_bin_from_k_heap";
1008 const auto& oe_ti = order_entry_expr->get_type_info();
1009 llvm::Value* null_key_lv =
nullptr;
1010 if (oe_ti.is_integer() || oe_ti.is_decimal() || oe_ti.is_time()) {
1011 const size_t bit_width = order_entry_lv->getType()->getIntegerBitWidth();
1012 switch (bit_width) {
1024 CHECK(oe_ti.is_fp());
1025 if (order_entry_lv->getType()->isDoubleTy()) {
1030 fname += order_entry_lv->getType()->isDoubleTy() ?
"_double" :
"_float";
1032 const auto key_slot_idx =
1040 LL_BOOL(only_order_entry.is_desc),
1041 LL_BOOL(!order_entry_expr->get_type_info().get_notnull()),
1042 LL_BOOL(only_order_entry.nulls_first),
1047 const auto output_buffer_entry_count_lv =
1048 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1050 const auto group_expr_lv =
1051 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1052 std::vector<llvm::Value*>
args{groups_buffer,
1053 output_buffer_entry_count_lv,
1055 code_generator.
posArg(
nullptr)};
1057 const auto columnar_output_offset =
1059 return columnar_output_offset;
1071 auto arg_it =
ROW_FUNC->arg_begin();
1072 auto groups_buffer = arg_it++;
1074 std::stack<llvm::BasicBlock*> array_loops;
1079 return std::make_tuple(
1083 return std::make_tuple(
1096 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1102 llvm::Value* group_key =
nullptr;
1103 llvm::Value* key_size_lv =
nullptr;
1114 col_width_size ==
sizeof(int32_t)
1122 int32_t subkey_idx = 0;
1125 const auto col_range_info =
1127 const auto translated_null_value =
static_cast<int64_t
>(
1132 (col_range_info.bucket ? col_range_info.bucket : 1));
1134 const bool col_has_nulls =
1139 : col_range_info.has_nulls)
1142 const auto group_expr_lvs =
1143 executor_->groupByColumnCodegen(group_expr.get(),
1147 translated_null_value,
1151 const auto group_expr_lv = group_expr_lvs.translated_value;
1158 group_expr_lvs.original_value,
1165 group_key->getType()->getScalarType()->getPointerElementType(),
1174 &*groups_buffer, group_key, key_size_lv, query_mem_desc, row_size_quad);
1186 return std::make_tuple(
nullptr,
nullptr);
1196 auto arg_it =
ROW_FUNC->arg_begin();
1198 auto varlen_output_buffer = arg_it++;
1199 CHECK(varlen_output_buffer->getType() == llvm::Type::getInt64PtrTy(
LL_CONTEXT));
1200 return varlen_output_buffer;
1203 std::tuple<llvm::Value*, llvm::Value*>
1207 llvm::Value* groups_buffer,
1208 llvm::Value* group_expr_lv_translated,
1209 llvm::Value* group_expr_lv_original,
1210 const int32_t row_size_quad) {
1214 ?
"get_columnar_group_bin_offset"
1215 :
"get_group_value_fast"};
1217 get_group_fn_name +=
"_keyless";
1222 get_group_fn_name +=
"_semiprivate";
1224 std::vector<llvm::Value*> get_group_fn_args{&*groups_buffer,
1225 &*group_expr_lv_translated};
1226 if (group_expr_lv_original && get_group_fn_name ==
"get_group_value_fast" &&
1228 get_group_fn_name +=
"_with_original_key";
1229 get_group_fn_args.push_back(group_expr_lv_original);
1235 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1239 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1243 get_group_fn_args.push_back(warp_idx);
1247 if (get_group_fn_name ==
"get_columnar_group_bin_offset") {
1248 return std::make_tuple(&*groups_buffer,
1249 emitCall(get_group_fn_name, get_group_fn_args));
1251 return std::make_tuple(
emitCall(get_group_fn_name, get_group_fn_args),
nullptr);
1255 llvm::Value* groups_buffer,
1256 llvm::Value* group_key,
1257 llvm::Value* key_size_lv,
1259 const int32_t row_size_quad) {
1266 LL_BUILDER.CreateCall(perfect_hash_func, std::vector<llvm::Value*>{group_key});
1270 const std::string set_matching_func_name{
1271 "set_matching_group_value_perfect_hash_columnar"};
1272 const std::vector<llvm::Value*> set_matching_func_arg{
1279 emitCall(set_matching_func_name, set_matching_func_arg);
1281 return std::make_tuple(groups_buffer, hash_lv);
1284 return std::make_tuple(
emitCall(
"get_matching_group_value_perfect_hash_keyless",
1285 {groups_buffer, hash_lv,
LL_INT(row_size_quad)}),
1288 return std::make_tuple(
1290 "get_matching_group_value_perfect_hash",
1291 {groups_buffer, hash_lv, group_key, key_size_lv,
LL_INT(row_size_quad)}),
1297 std::tuple<llvm::Value*, llvm::Value*>
1300 llvm::Value* groups_buffer,
1301 llvm::Value* group_key,
1302 llvm::Value* key_size_lv,
1304 const size_t key_width,
1305 const int32_t row_size_quad) {
1307 if (group_key->getType() != llvm::Type::getInt64PtrTy(
LL_CONTEXT)) {
1308 CHECK(key_width ==
sizeof(int32_t));
1312 std::vector<llvm::Value*> func_args{
1317 LL_INT(static_cast<int32_t>(key_width))};
1318 std::string func_name{
"get_group_value"};
1320 func_name +=
"_columnar_slot";
1322 func_args.push_back(
LL_INT(row_size_quad));
1325 func_name +=
"_with_watchdog";
1328 return std::make_tuple(groups_buffer,
emitCall(func_name, func_args));
1330 return std::make_tuple(
emitCall(func_name, func_args),
nullptr);
1337 auto ft = llvm::FunctionType::get(
1341 auto key_hash_func = llvm::Function::Create(ft,
1342 llvm::Function::ExternalLinkage,
1345 executor_->cgen_state_->helper_functions_.push_back(key_hash_func);
1347 auto& key_buff_arg = *key_hash_func->args().begin();
1348 llvm::Value* key_buff_lv = &key_buff_arg;
1349 auto bb = llvm::BasicBlock::Create(
LL_CONTEXT,
"entry", key_hash_func);
1350 llvm::IRBuilder<> key_hash_func_builder(bb);
1352 std::vector<int64_t> cardinalities;
1354 auto col_range_info =
1361 auto* gep = key_hash_func_builder.CreateGEP(
1362 key_buff_lv->getType()->getScalarType()->getPointerElementType(),
1366 key_hash_func_builder.CreateLoad(gep->getType()->getPointerElementType(), gep);
1367 auto col_range_info =
1370 key_hash_func_builder.CreateSub(key_comp_lv,
LL_INT(col_range_info.min));
1371 if (col_range_info.bucket) {
1373 key_hash_func_builder.CreateSDiv(crt_term_lv,
LL_INT(col_range_info.bucket));
1375 for (
size_t prev_dim_idx = 0; prev_dim_idx < dim_idx; ++prev_dim_idx) {
1376 crt_term_lv = key_hash_func_builder.CreateMul(crt_term_lv,
1377 LL_INT(cardinalities[prev_dim_idx]));
1379 hash_lv = key_hash_func_builder.CreateAdd(hash_lv, crt_term_lv);
1382 key_hash_func_builder.CreateRet(
1384 return key_hash_func;
1389 llvm::Value* target) {
1391 const auto& agg_type = agg_info.
sql_type;
1392 const size_t chosen_bytes = agg_type.
get_size();
1394 bool need_conversion{
false};
1395 llvm::Value* arg_null{
nullptr};
1396 llvm::Value* agg_null{
nullptr};
1397 llvm::Value* target_to_cast{target};
1398 if (arg_type.
is_fp()) {
1399 arg_null =
executor_->cgen_state_->inlineFpNull(arg_type);
1400 if (agg_type.is_fp()) {
1401 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1402 if (!static_cast<llvm::ConstantFP*>(arg_null)->isExactlyValue(
1403 static_cast<llvm::ConstantFP*>(agg_null)->getValueAPF())) {
1404 need_conversion =
true;
1411 arg_null =
executor_->cgen_state_->inlineIntNull(arg_type);
1412 if (agg_type.is_fp()) {
1413 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1414 need_conversion =
true;
1415 target_to_cast =
executor_->castToFP(target, arg_type, agg_type);
1417 agg_null =
executor_->cgen_state_->inlineIntNull(agg_type);
1418 if ((static_cast<llvm::ConstantInt*>(arg_null)->getBitWidth() !=
1419 static_cast<llvm::ConstantInt*>(agg_null)->getBitWidth()) ||
1420 (static_cast<llvm::ConstantInt*>(arg_null)->getValue() !=
1421 static_cast<llvm::ConstantInt*>(agg_null)->getValue())) {
1422 need_conversion =
true;
1426 if (need_conversion) {
1427 auto cmp = arg_type.
is_fp() ?
LL_BUILDER.CreateFCmpOEQ(target, arg_null)
1432 executor_->cgen_state_->castToTypeIn(target_to_cast, chosen_bytes << 3));
1444 const auto window_func_context =
1449 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1450 auto arg_it =
ROW_FUNC->arg_begin();
1451 auto groups_buffer = arg_it++;
1454 window_func_context, code_generator.
posArg(
nullptr));
1455 const auto pos_in_window =
1457 llvm::Value* entry_count_lv =
1459 std::vector<llvm::Value*>
args{
1460 &*groups_buffer, entry_count_lv, pos_in_window, code_generator.
posArg(
nullptr)};
1462 const auto columnar_output_offset =
1469 auto arg_it =
ROW_FUNC->arg_begin();
1470 auto groups_buffer = arg_it++;
1475 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
1476 llvm::Value* varlen_output_buffer,
1477 const std::vector<llvm::Value*>& agg_out_vec,
1483 auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
1486 const bool is_group_by = std::get<0>(agg_out_ptr_w_idx);
1487 bool can_return_error =
false;
1489 CHECK(agg_out_vec.empty());
1491 CHECK(!agg_out_vec.empty());
1496 llvm::Value* output_buffer_byte_stream{
nullptr};
1497 llvm::Value* out_row_idx{
nullptr};
1500 output_buffer_byte_stream =
LL_BUILDER.CreateBitCast(
1501 std::get<0>(agg_out_ptr_w_idx),
1502 llvm::PointerType::get(llvm::Type::getInt8Ty(
LL_CONTEXT), 0));
1503 output_buffer_byte_stream->setName(
"out_buff_b_stream");
1504 CHECK(std::get<1>(agg_out_ptr_w_idx));
1505 out_row_idx =
LL_BUILDER.CreateZExt(std::get<1>(agg_out_ptr_w_idx),
1507 out_row_idx->setName(
"out_row_idx");
1516 target_builder(target_expr,
executor_, query_mem_desc, co);
1526 output_buffer_byte_stream,
1528 varlen_output_buffer,
1533 executor_->plan_state_->isLazyFetchColumn(target_expr);
1536 return can_return_error;
1543 llvm::Value* output_buffer_byte_stream,
1544 llvm::Value* out_row_idx,
1545 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
1547 const size_t chosen_bytes,
1548 const size_t agg_out_off,
1549 const size_t target_idx) {
1551 llvm::Value* agg_col_ptr{
nullptr};
1557 CHECK(chosen_bytes == 1 || chosen_bytes == 2 || chosen_bytes == 4 ||
1559 CHECK(output_buffer_byte_stream);
1563 auto out_per_col_byte_idx =
1565 LL_BUILDER.CreateShl(out_row_idx, __lzcnt(chosen_bytes) - 1);
1567 LL_BUILDER.CreateShl(out_row_idx, __builtin_ffs(chosen_bytes) - 1);
1569 auto byte_offset =
LL_BUILDER.CreateAdd(out_per_col_byte_idx,
1570 LL_INT(static_cast<int64_t>(col_off)));
1571 byte_offset->setName(
"out_byte_off_target_" +
std::to_string(target_idx));
1573 output_buffer_byte_stream->getType()->getScalarType()->getPointerElementType(),
1574 output_buffer_byte_stream,
1579 agg_col_ptr->setName(
"out_ptr_target_" +
std::to_string(target_idx));
1582 CHECK_EQ(
size_t(0), col_off % chosen_bytes);
1583 col_off /= chosen_bytes;
1584 CHECK(std::get<1>(agg_out_ptr_w_idx));
1585 auto offset =
LL_BUILDER.CreateAdd(std::get<1>(agg_out_ptr_w_idx),
LL_INT(col_off));
1587 std::get<0>(agg_out_ptr_w_idx),
1590 bit_cast->getType()->getScalarType()->getPointerElementType(),
1596 CHECK_EQ(
size_t(0), col_off % chosen_bytes);
1597 col_off /= chosen_bytes;
1599 std::get<0>(agg_out_ptr_w_idx),
1602 bit_cast->getType()->getScalarType()->getPointerElementType(),
1616 auto estimator_comp_count_lv =
LL_INT(static_cast<int32_t>(estimator_arg.size()));
1618 estimator_comp_count_lv);
1619 int32_t subkey_idx = 0;
1620 for (
const auto& estimator_arg_comp : estimator_arg) {
1621 const auto estimator_arg_comp_lvs =
1622 executor_->groupByColumnCodegen(estimator_arg_comp.get(),
1630 CHECK(!estimator_arg_comp_lvs.original_value);
1631 const auto estimator_arg_comp_lv = estimator_arg_comp_lvs.translated_value;
1634 estimator_arg_comp_lv,
1636 estimator_key_lv->getType()->getScalarType()->getPointerElementType(),
1642 const auto key_bytes =
LL_BUILDER.CreateBitCast(estimator_key_lv, int8_ptr_ty);
1643 const auto estimator_comp_bytes_lv =
1644 LL_INT(static_cast<int32_t>(estimator_arg.size() *
sizeof(int64_t)));
1645 const auto bitmap_size_lv =
1648 {bitmap, &*bitmap_size_lv, key_bytes, &*estimator_comp_bytes_lv});
1657 const int64_t skip_val) {
1658 if (val != skip_val) {
1670 const size_t target_idx,
1672 std::vector<llvm::Value*>& agg_args,
1677 const auto& arg_ti =
1679 if (arg_ti.is_fp()) {
1680 agg_args.back() =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1683 const auto& count_distinct_descriptor =
1688 agg_args.push_back(
LL_INT(int32_t(count_distinct_descriptor.bitmap_sz_bits)));
1692 agg_args.push_back(base_dev_addr);
1693 agg_args.push_back(base_host_addr);
1694 emitCall(
"agg_approximate_count_distinct_gpu", agg_args);
1696 emitCall(
"agg_approximate_count_distinct", agg_args);
1700 std::string agg_fname{
"agg_count_distinct"};
1702 agg_fname +=
"_bitmap";
1703 agg_args.push_back(
LL_INT(static_cast<int64_t>(count_distinct_descriptor.min_val)));
1705 if (agg_info.skip_null_val) {
1706 auto null_lv =
executor_->cgen_state_->castToTypeIn(
1708 ?
static_cast<llvm::Value*
>(
executor_->cgen_state_->inlineFpNull(arg_ti))
1709 : static_cast<llvm::Value*>(
executor_->cgen_state_->inlineIntNull(arg_ti))),
1711 null_lv =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1713 agg_fname +=
"_skip_val";
1714 agg_args.push_back(null_lv);
1718 agg_fname +=
"_gpu";
1721 agg_args.push_back(base_dev_addr);
1722 agg_args.push_back(base_host_addr);
1723 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.sub_bitmap_count)));
1725 count_distinct_descriptor.bitmapPaddedSizeBytes() %
1726 count_distinct_descriptor.sub_bitmap_count);
1727 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.bitmapPaddedSizeBytes() /
1728 count_distinct_descriptor.sub_bitmap_count)));
1733 executor_->cgen_state_->emitExternalCall(
1734 agg_fname, llvm::Type::getVoidTy(
LL_CONTEXT), agg_args);
1739 const size_t target_idx,
1741 std::vector<llvm::Value*>& agg_args,
1747 llvm::BasicBlock *calc, *skip;
1753 auto* cs =
executor_->cgen_state_.get();
1754 auto& irb = cs->ir_builder_;
1756 auto*
const null_value = cs->castToTypeIn(cs->inlineNull(arg_ti), 64);
1757 auto*
const skip_cond = arg_ti.is_fp()
1758 ? irb.CreateFCmpOEQ(agg_args.back(), null_value)
1759 : irb.CreateICmpEQ(agg_args.back(), null_value);
1760 calc = llvm::BasicBlock::Create(cs->context_,
"calc_approx_quantile");
1761 skip = llvm::BasicBlock::Create(cs->context_,
"skip_approx_quantile");
1762 irb.CreateCondBr(skip_cond, skip, calc);
1763 cs->current_func_->getBasicBlockList().push_back(calc);
1764 irb.SetInsertPoint(calc);
1766 if (!arg_ti.is_fp()) {
1768 agg_args.back() =
executor_->castToFP(agg_args.back(), arg_ti, agg_info.sql_type);
1770 cs->emitExternalCall(
1771 "agg_approx_quantile", llvm::Type::getVoidTy(cs->context_), agg_args);
1774 cs->current_func_->getBasicBlockList().push_back(skip);
1775 irb.SetInsertPoint(skip);
1785 LL_BUILDER.CreateGEP(bit_cast->getType()->getScalarType()->getPointerElementType(),
1788 return LL_BUILDER.CreateLoad(gep->getType()->getPointerElementType(), gep);
1803 if (target_ti.is_buffer() &&
1804 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
1805 const auto target_lvs =
1806 agg_expr ? code_generator.
codegen(agg_expr->get_arg(),
true, co)
1808 target_expr, !
executor_->plan_state_->allow_lazy_fetch_, co);
1809 if (!func_expr && !arr_expr) {
1812 if (target_ti.is_bytes()) {
1813 CHECK_EQ(
size_t(3), target_lvs.size());
1814 return {target_lvs[1], target_lvs[2]};
1816 CHECK(target_ti.is_array());
1817 CHECK_EQ(
size_t(1), target_lvs.size());
1818 CHECK(!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE);
1822 const auto& elem_ti = target_ti.get_elem_type();
1824 executor_->cgen_state_->emitExternalCall(
1827 {target_lvs.front(), code_generator.
posArg(target_expr)}),
1828 executor_->cgen_state_->emitExternalCall(
1831 {target_lvs.front(),
1832 code_generator.
posArg(target_expr),
1836 throw std::runtime_error(
1837 "Using array[] operator as argument to an aggregate operator is not "
1840 CHECK(func_expr || arr_expr);
1841 if (dynamic_cast<const Analyzer::FunctionOper*>(target_expr)) {
1842 CHECK_EQ(
size_t(1), target_lvs.size());
1843 const auto prefix = target_ti.get_buffer_name();
1844 CHECK(target_ti.is_array() || target_ti.is_bytes());
1845 const auto target_lv =
LL_BUILDER.CreateLoad(
1846 target_lvs[0]->getType()->getPointerElementType(), target_lvs[0]);
1850 const auto i8p_ty = llvm::PointerType::get(
1852 const auto ptr =
LL_BUILDER.CreatePointerCast(
1853 LL_BUILDER.CreateExtractValue(target_lv, 0), i8p_ty);
1854 const auto size =
LL_BUILDER.CreateExtractValue(target_lv, 1);
1855 const auto null_flag =
LL_BUILDER.CreateExtractValue(target_lv, 2);
1856 const auto nullcheck_ok_bb =
1858 const auto nullcheck_fail_bb = llvm::BasicBlock::Create(
1862 const auto nullcheck =
LL_BUILDER.CreateICmpEQ(
1863 null_flag,
executor_->cgen_state_->llInt(static_cast<int8_t>(1)));
1864 LL_BUILDER.CreateCondBr(nullcheck, nullcheck_fail_bb, nullcheck_ok_bb);
1869 auto result_phi =
LL_BUILDER.CreatePHI(i8p_ty, 2, prefix +
"_ptr_return");
1870 result_phi->addIncoming(ptr, nullcheck_ok_bb);
1871 const auto null_arr_sentinel =
LL_BUILDER.CreateIntToPtr(
1872 executor_->cgen_state_->llInt(static_cast<int8_t>(0)), i8p_ty);
1873 result_phi->addIncoming(null_arr_sentinel, nullcheck_fail_bb);
1875 executor_->cgen_state_->emitExternalCall(
1876 "register_buffer_with_executor_rsm",
1877 llvm::Type::getVoidTy(
executor_->cgen_state_->context_),
1880 LL_BUILDER.SetInsertPoint(nullcheck_fail_bb);
1884 return {result_phi, size};
1886 CHECK_EQ(
size_t(2), target_lvs.size());
1887 return {target_lvs[0], target_lvs[1]};
1890 if (target_ti.is_geometry() &&
1891 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
1892 auto generate_coord_lvs =
1893 [&](
auto* selected_target_expr,
1894 bool const fetch_columns) -> std::vector<llvm::Value*> {
1895 const auto target_lvs =
1896 code_generator.
codegen(selected_target_expr, fetch_columns, co);
1897 if (dynamic_cast<const Analyzer::GeoOperator*>(target_expr) &&
1904 if (geo_uoper || geo_binoper) {
1906 CHECK_EQ(2 * static_cast<size_t>(target_ti.get_physical_coord_cols()),
1910 CHECK_EQ(static_cast<size_t>(target_ti.get_physical_coord_cols()),
1916 std::vector<llvm::Value*> coords;
1918 for (
const auto& target_lv : target_lvs) {
1924 const size_t elem_sz = ctr == 0 ? 1 : 4;
1926 int32_t fixlen = -1;
1927 if (target_ti.get_type() ==
kPOINT) {
1930 const auto coords_cd =
executor_->getPhysicalColumnDescriptor(col_var, 1);
1931 if (coords_cd && coords_cd->columnType.get_type() ==
kARRAY) {
1932 fixlen = coords_cd->columnType.get_size();
1937 coords.push_back(
executor_->cgen_state_->emitExternalCall(
1938 "fast_fixlen_array_buff",
1940 {target_lv, code_generator.
posArg(selected_target_expr)}));
1941 coords.push_back(
executor_->cgen_state_->llInt(int64_t(fixlen)));
1944 coords.push_back(
executor_->cgen_state_->emitExternalCall(
1947 {target_lv, code_generator.
posArg(selected_target_expr)}));
1948 coords.push_back(
executor_->cgen_state_->emitExternalCall(
1952 code_generator.
posArg(selected_target_expr),
1959 return generate_coord_lvs(agg_expr->get_arg(),
true);
1961 return generate_coord_lvs(target_expr,
1962 !
executor_->plan_state_->allow_lazy_fetch_);
1966 return agg_expr ? code_generator.codegen(agg_expr->get_arg(),
true, co)
1967 : code_generator.codegen(
1968 target_expr, !executor_->plan_state_->allow_lazy_fetch_, co);
1972 const std::vector<llvm::Value*>&
args) {
1974 return executor_->cgen_state_->emitCall(fname, args);
1979 auto zero_const = llvm::ConstantInt::get(retCode->getType(), 0,
true);
1980 auto rc_check_condition =
executor_->cgen_state_->ir_builder_.CreateICmp(
1981 llvm::ICmpInst::ICMP_EQ, retCode, zero_const);
1983 executor_->cgen_state_->emitErrorCheck(rc_check_condition, retCode,
"rc");
2001 const auto grouped_col_expr =
2003 if (!grouped_col_expr) {
2006 if (grouped_col_expr->get_table_id() <= 0) {
2010 if (td->shardedColumnId == grouped_col_expr->get_column_id()) {
RUNTIME_EXPORT void agg_approx_quantile(int64_t *agg, const double val)
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
std::vector< Analyzer::Expr * > target_exprs
SqlWindowFunctionKind getKind() const
size_t g_watchdog_baseline_max_groups
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
robin_hood::unordered_set< int64_t > CountDistinctSet
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
llvm::Value * getAdditionalLiteral(const int32_t off)
ColRangeInfo get_expr_range_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const Analyzer::Expr *expr, Executor *executor)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
HOST DEVICE int get_size() const
size_t getEntryCount() const
class for a per-database catalog. also includes metadata for the current database and the current use...
RUNTIME_EXPORT void agg_count_distinct(int64_t *agg, const int64_t val)
int hll_size_for_rate(const int err_percent)
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context)
boost::multiprecision::number< boost::multiprecision::cpp_int_backend< 64, 64, boost::multiprecision::signed_magnitude, boost::multiprecision::checked, void >> checked_int64_t
bool is_column_range_too_big_for_perfect_hash(const ColRangeInfo &col_range_info, const int64_t max_entry_count)
bool expr_is_rowid(const Analyzer::Expr *expr, const Catalog_Namespace::Catalog &cat)
KeylessInfo get_keyless_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const bool is_group_by, Executor *executor)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
Streaming Top N algorithm.
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
bool mustUseBaselineSort() const
void mark_function_always_inline(llvm::Function *func)
ColRangeInfo getColRangeInfo()
bool hasVarlenOutput() const
const std::list< Analyzer::OrderEntry > order_entries
static const size_t baseline_threshold
QueryDescriptionType hash_type_
llvm::Value * posArg(const Analyzer::Expr *) const
CountDistinctDescriptors init_count_distinct_descriptors(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ExecutorDeviceType device_type, Executor *executor)
static std::unique_ptr< QueryMemoryDescriptor > init(const Executor *executor, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ColRangeInfo &col_range_info, const KeylessInfo &keyless_info, const bool allow_multifrag, const ExecutorDeviceType device_type, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, const size_t shard_count, const size_t max_groups_buffer_entry_count, RenderInfo *render_info, const CountDistinctDescriptors count_distinct_descriptors, const bool must_use_baseline_sort, const bool output_columnar_hint, const bool streaming_top_n_hint)
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
bool hasKeylessHash() const
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
llvm::Value * codegenVarlenOutputBuffer(const QueryMemoryDescriptor &query_mem_desc)
size_t getEffectiveKeyWidth() const
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool with_dynamic_watchdog
size_t get_heap_key_slot_index(const std::vector< Analyzer::Expr * > &target_exprs, const size_t target_idx)
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
bool takes_float_argument(const TargetInfo &target_info)
#define LLVM_ALIGN(alignment)
bool has_count_distinct(const RelAlgExecutionUnit &ra_exe_unit)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
size_t getRowSize() const
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
int64_t getMaxVal() const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const std::optional< int64_t > &group_cardinality_estimation)
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
bool useStreamingTopN() const
const ColumnDescriptor * get_column_descriptor_maybe(const int col_id, const int table_id, const Catalog_Namespace::Catalog &cat)
std::vector< CountDistinctDescriptor > CountDistinctDescriptors
size_t getGroupbyColCount() const
RUNTIME_EXPORT void agg_count_distinct_skip_val(int64_t *agg, const int64_t val, const int64_t skip_val)
const JoinQualsPerNestingLevel join_quals
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, llvm::Value *varlen_output_buffer, const std::vector< llvm::Value * > &agg_out_vec, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
int64_t g_bitmap_memory_limit
bool is_distinct_target(const TargetInfo &target_info)
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
const std::shared_ptr< Analyzer::Estimator > estimator
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
const SQLTypeInfo & get_type_info() const
QueryDescriptionType getQueryDescriptionType() const
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
ExecutorDeviceType device_type
int64_t getBucket() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
const std::vector< InputTableInfo > & query_infos_
bool isSingleColumnGroupByWithPerfectHash() const
int64_t get_bucketed_cardinality_without_nulls(const ColRangeInfo &col_range_info)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool cardinality_estimate_less_than_column_range(const int64_t cardinality_estimate, const ColRangeInfo &col_range_info)
int32_t get_agg_count(const std::vector< Analyzer::Expr * > &target_exprs)
Descriptor for the result set buffer layout.
const std::optional< int64_t > group_cardinality_estimation_
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
std::list< std::shared_ptr< Analyzer::Expr > > quals
bool didOutputColumnar() const
bool usesGetGroupValueFast() const
bool interleavedBins(const ExecutorDeviceType) const
bool threadsShareMemory() const
int64_t getMinVal() const
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
Estimators to be used when precise cardinality isn't useful.
llvm::Value * codegenWindowPosition(WindowFunctionContext *window_func_context, llvm::Value *pos_arg)
RUNTIME_EXPORT ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)
uint32_t log2_bytes(const uint32_t bytes)
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
HOST DEVICE bool get_notnull() const
const RelAlgExecutionUnit & ra_exe_unit_
size_t getColOffInBytes(const size_t col_idx) const
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit, const Catalog_Namespace::Catalog &catalog)