32 #include "../CudaMgr/CudaMgr.h"
33 #include "../Shared/checked_alloc.h"
34 #include "../Shared/funcannotations.h"
35 #include "../Utils/ChunkIter.h"
45 #include <llvm/Transforms/Utils/BasicBlockUtils.h>
50 #include <string_view>
61 return min == 0 &&
max == -1;
65 out <<
"Hash Type = " << info.
hash_type_ <<
" min = " << info.
min
66 <<
" max = " << info.
max <<
" bucket = " << info.
bucket
67 <<
" has_nulls = " << info.
has_nulls <<
"\n";
80 out <<
"UnorderedSet";
83 out <<
"<Unkown Type>";
102 for (
auto target_expr : target_exprs) {
105 if (!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE) {
107 if (ti.is_buffer()) {
109 }
else if (ti.is_geometry()) {
110 agg_count += ti.get_physical_coord_cols() * 2;
116 if (agg_expr && agg_expr->get_aggtype() ==
kAVG) {
131 if (!cd || !cd->isVirtualCol) {
139 for (
const auto& target_expr : ra_exe_unit.
target_exprs) {
149 const int64_t max_entry_count) {
175 const std::vector<InputTableInfo>& query_infos,
177 Executor* executor) {
183 expr, query_infos, executor, boost::make_optional(ra_exe_unit.
simple_quals));
184 switch (expr_range.getType()) {
186 if (expr_range.getIntMin() > expr_range.getIntMax()) {
191 expr_range.getIntMin(),
192 expr_range.getIntMax(),
193 expr_range.getBucket(),
194 expr_range.hasNulls()};
198 if (expr_range.getFpMin() > expr_range.getFpMax()) {
221 const int64_t baseline_threshold =
226 bool has_nulls{
false};
236 cardinality *= crt_col_cardinality;
237 if (col_range_info.has_nulls) {
242 if (!cardinality || cardinality > baseline_threshold) {
247 int64_t(cardinality),
266 return col_range_info;
268 static const int64_t MAX_BUFFER_SIZE = 1 << 30;
269 const int64_t col_count =
271 int64_t max_entry_count = MAX_BUFFER_SIZE / (col_count *
sizeof(int64_t));
273 max_entry_count = std::min(max_entry_count, baseline_threshold);
276 if (groupby_expr_ti.is_string() && !col_range_info.bucket) {
279 const bool has_filters =
304 col_range_info.has_nulls};
307 return col_range_info;
319 col_range_info.has_nulls};
324 !col_range_info.bucket) {
329 col_range_info.has_nulls};
331 return col_range_info;
337 if (col_range_info.
bucket) {
338 crt_col_cardinality /= col_range_info.
bucket;
340 return static_cast<int64_t
>(crt_col_cardinality +
341 (1 + (col_range_info.
has_nulls ? 1 : 0)));
347 if (col_range_info.
min <= col_range_info.
max) {
348 size_t size = col_range_info.
max - col_range_info.
min;
349 if (col_range_info.
bucket) {
350 size /= col_range_info.
bucket;
352 if (size >= static_cast<size_t>(std::numeric_limits<int64_t>::max())) {
357 return static_cast<int64_t
>(size + 1);
364 #define LL_CONTEXT executor_->cgen_state_->context_
365 #define LL_BUILDER executor_->cgen_state_->ir_builder_
366 #define LL_BOOL(v) executor_->cgen_state_->llBool(v)
367 #define LL_INT(v) executor_->cgen_state_->llInt(v)
368 #define LL_FP(v) executor_->cgen_state_->llFp(v)
369 #define ROW_FUNC executor_->cgen_state_->row_func_
370 #define CUR_FUNC executor_->cgen_state_->current_func_
376 const std::vector<InputTableInfo>& query_infos,
377 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
378 const std::optional<int64_t>& group_cardinality_estimation)
380 , ra_exe_unit_(ra_exe_unit)
381 , query_infos_(query_infos)
382 , row_set_mem_owner_(row_set_mem_owner)
383 , device_type_(device_type)
384 , group_cardinality_estimation_(group_cardinality_estimation) {
389 const auto& groupby_ti = groupby_expr->get_type_info();
390 if (groupby_ti.is_bytes()) {
391 throw std::runtime_error(
392 "Cannot group by string columns which are not dictionary encoded.");
394 if (groupby_ti.is_buffer()) {
395 throw std::runtime_error(
"Group by buffer not supported");
397 if (groupby_ti.is_geometry()) {
398 throw std::runtime_error(
"Group by geometry not supported");
404 const size_t shard_count)
const {
405 size_t device_count{0};
407 device_count =
executor_->cudaMgr()->getDeviceCount();
411 int64_t bucket{col_range_info.
bucket};
434 if (device_count < shard_count) {
435 bucket =
g_leaf_count ? std::max(device_count, static_cast<size_t>(1))
436 : std::min(device_count, shard_count - device_count);
438 bucket = shard_count * std::max(
g_leaf_count, static_cast<size_t>(1));
457 const std::vector<InputTableInfo>& query_infos,
458 const bool is_group_by,
459 Executor* executor) {
460 bool keyless{
true}, found{
false};
461 int32_t num_agg_expr{0};
463 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
466 if (agg_info.is_agg) {
472 const auto arg_expr =
agg_arg(target_expr);
474 switch (agg_info.agg_kind) {
477 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
480 expr_range_info.hasNulls()) {
487 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
490 expr_range_info.hasNulls()) {
497 auto arg_ti = arg_expr->get_type_info();
499 arg_ti.set_notnull(
true);
501 if (!arg_ti.get_notnull()) {
504 !expr_range_info.hasNulls()) {
509 switch (expr_range_info.getType()) {
512 if (expr_range_info.getFpMax() < 0 || expr_range_info.getFpMin() > 0) {
517 if (expr_range_info.getIntMax() < 0 || expr_range_info.getIntMin() > 0) {
528 CHECK(agg_expr && agg_expr->get_arg());
529 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
530 if (arg_ti.is_string() || arg_ti.is_buffer()) {
533 auto expr_range_info =
537 is_group_by || float_argument_input,
538 float_argument_input ?
sizeof(
float) : 8);
539 switch (expr_range_info.getType()) {
543 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_max));
544 if (expr_range_info.getFpMax() < double_max) {
550 if (expr_range_info.getIntMax() < init_max) {
560 CHECK(agg_expr && agg_expr->get_arg());
561 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
562 if (arg_ti.is_string() || arg_ti.is_buffer()) {
565 auto expr_range_info =
570 expr_range_info.hasNulls()) {
575 is_group_by || float_argument_input,
576 float_argument_input ?
sizeof(
float) : 8);
577 switch (expr_range_info.getType()) {
581 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_min));
582 if (expr_range_info.getFpMin() > double_min) {
588 if (expr_range_info.getIntMin() > init_min) {
619 const std::vector<InputTableInfo>& query_infos,
622 Executor* executor) {
624 auto compute_bytes_per_group =
626 size_t effective_size_bytes = (bitmap_sz + 7) / 8;
627 const auto padded_size =
630 : effective_size_bytes;
631 return padded_size * sub_bitmap_count;
633 for (
size_t i = 0; i < ra_exe_unit.
target_exprs.size(); i++) {
637 CHECK(agg_info.is_agg);
641 if (arg_ti.is_bytes()) {
642 throw std::runtime_error(
643 "Strings must be dictionary-encoded for COUNT(DISTINCT).");
646 throw std::runtime_error(
"APPROX_COUNT_DISTINCT on arrays not supported yet");
649 throw std::runtime_error(
650 "APPROX_COUNT_DISTINCT on geometry columns not supported");
652 if (agg_info.is_distinct && arg_ti.is_geometry()) {
653 throw std::runtime_error(
"COUNT DISTINCT on geometry columns not supported");
656 auto arg_range_info =
657 arg_ti.is_fp() ? no_range_info
659 ra_exe_unit, query_infos, agg_expr->get_arg(), executor);
662 const auto& original_target_expr_ti = it->second;
663 if (arg_ti.is_integer() && original_target_expr_ti.get_type() ==
kDATE &&
667 auto is_date_value_not_encoded = [&original_target_expr_ti](int64_t date_val) {
668 if (original_target_expr_ti.get_comp_param() == 16) {
669 return date_val < INT16_MIN || date_val > INT16_MAX;
671 return date_val < INT32_MIN || date_val > INT32_MIN;
674 if (is_date_value_not_encoded(arg_range_info.min)) {
680 if (is_date_value_not_encoded(arg_range_info.max)) {
686 arg_range_info.bucket = 0;
691 int64_t bitmap_sz_bits{0};
693 const auto error_rate_expr = agg_expr->get_arg1();
694 if (error_rate_expr) {
695 CHECK(error_rate_expr->get_type_info().get_type() ==
kINT);
696 auto const error_rate =
699 CHECK_GE(error_rate->get_constval().intval, 1);
705 if (arg_range_info.isEmpty()) {
706 count_distinct_descriptors.emplace_back(
715 const auto sub_bitmap_count =
717 size_t worst_case_num_groups{1};
719 !(arg_ti.is_buffer() || arg_ti.is_geometry())) {
722 if (shared::is_any<kCOUNT, kCOUNT_IF>(agg_info.agg_kind)) {
728 const auto total_bytes_per_entry =
729 compute_bytes_per_group(bitmap_sz_bits, sub_bitmap_count, device_type);
730 const auto range_bucket = std::max(group_by_range_info.
bucket, (int64_t)1);
731 const auto maximum_num_groups =
732 (group_by_range_info.
max - group_by_range_info.
min + 1) / range_bucket;
733 const auto total_bitmap_bytes_for_groups =
734 total_bytes_per_entry * maximum_num_groups;
737 if (total_bitmap_bytes_for_groups >=
739 const auto agg_expr_max_entry_count =
740 arg_range_info.max - arg_range_info.min + 1;
741 int64_t max_agg_expr_table_cardinality{1};
743 bool (*)(
const Analyzer::ColumnVar*,
const Analyzer::ColumnVar*)>
746 for (
const auto cv : colvar_set) {
748 std::find_if(query_infos.begin(),
750 [&](
const auto& input_table_info) {
751 return input_table_info.table_key == cv->getTableKey();
753 int64_t cur_table_cardinality =
754 it != query_infos.end()
755 ?
static_cast<int64_t
>(it->info.getNumTuplesUpperBound())
757 max_agg_expr_table_cardinality =
758 std::max(max_agg_expr_table_cardinality, cur_table_cardinality);
759 worst_case_num_groups *= cur_table_cardinality;
761 auto has_valid_stat = [agg_expr_max_entry_count, maximum_num_groups]() {
762 return agg_expr_max_entry_count > 0 && maximum_num_groups > 0;
765 if (has_valid_stat()) {
769 const size_t unordered_set_threshold{2};
775 const auto bits_for_agg_entry = std::ceil(log(agg_expr_max_entry_count));
776 const auto bits_for_agg_table =
777 std::ceil(log(max_agg_expr_table_cardinality));
778 const auto avg_num_unique_entries_per_group =
779 std::ceil(max_agg_expr_table_cardinality / maximum_num_groups);
786 if ((bits_for_agg_entry - bits_for_agg_table) >= unordered_set_threshold ||
787 agg_expr_max_entry_count >= avg_num_unique_entries_per_group) {
790 throw std::runtime_error(
791 "Consider using approx_count_distinct operator instead of "
792 "count_distinct operator to lower the memory "
801 !(arg_ti.is_array() || arg_ti.is_geometry())) {
804 const size_t too_many_entries{100000000};
806 worst_case_num_groups > too_many_entries &&
809 "Detect too many input entries for set-based count distinct operator under "
812 count_distinct_descriptors.emplace_back(
824 return count_distinct_descriptors;
830 const bool allow_multifrag,
831 const size_t max_groups_buffer_entry_count,
832 const int8_t crt_min_byte_width,
834 const bool output_columnar_hint) {
838 bool sort_on_gpu_hint =
846 bool must_use_baseline_sort = shard_count;
850 max_groups_buffer_entry_count,
854 must_use_baseline_sort,
855 output_columnar_hint);
856 CHECK(query_mem_desc);
857 if (query_mem_desc->sortOnGpu() &&
859 align_to_int64(query_mem_desc->getEntryCount() *
sizeof(int32_t))) >
860 2 * 1024 * 1024 * 1024LL) {
861 must_use_baseline_sort =
true;
862 sort_on_gpu_hint =
false;
871 const bool allow_multifrag,
872 const size_t max_groups_buffer_entry_count,
873 const int8_t crt_min_byte_width,
874 const bool sort_on_gpu_hint,
876 const bool must_use_baseline_sort,
877 const bool output_columnar_hint) {
886 const auto col_range_info =
888 col_range_info_nosharding.min,
889 col_range_info_nosharding.max,
891 col_range_info_nosharding.has_nulls};
895 const auto keyless_info =
906 (col_range_info.max - col_range_info.min) /
907 std::max(col_range_info.bucket, int64_t(1)) >
925 max_groups_buffer_entry_count,
927 count_distinct_descriptors,
928 must_use_baseline_sort,
929 output_columnar_hint,
932 LOG(
WARNING) << e.what() <<
" Disabling Streaming Top N.";
943 max_groups_buffer_entry_count,
945 count_distinct_descriptors,
946 must_use_baseline_sort,
947 output_columnar_hint,
953 const std::list<Analyzer::OrderEntry>& order_entries) {
954 if (order_entries.size() > 1) {
957 for (
const auto& order_entry : order_entries) {
961 if (!dynamic_cast<Analyzer::AggExpr*>(target_expr)) {
966 if (agg_expr->get_is_distinct() || agg_expr->get_aggtype() ==
kAVG ||
967 agg_expr->get_aggtype() ==
kMIN || agg_expr->get_aggtype() ==
kMAX ||
971 if (agg_expr->get_arg()) {
973 if (arg_ti.is_fp()) {
976 auto expr_range_info =
981 expr_range_info.has_nulls) &&
982 order_entry.is_desc == order_entry.nulls_first) {
986 const auto& target_ti = target_expr->get_type_info();
987 CHECK(!target_ti.is_buffer());
988 if (!target_ti.is_integer()) {
996 llvm::BasicBlock* sc_false,
1001 CHECK(filter_result);
1003 bool can_return_error =
false;
1004 llvm::BasicBlock* filter_false{
nullptr};
1026 llvm::Value* old_total_matched_val{
nullptr};
1028 old_total_matched_val =
1029 LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
1032 #
if LLVM_VERSION_MAJOR > 12
1035 llvm::AtomicOrdering::Monotonic);
1037 old_total_matched_val =
LL_BUILDER.CreateLoad(
1038 total_matched_ptr->getType()->getPointerElementType(), total_matched_ptr);
1044 LL_BUILDER.CreateStore(old_total_matched_val, old_total_matched_ptr);
1047 auto agg_out_ptr_w_idx =
codegenGroupBy(query_mem_desc, co, filter_cfg);
1053 filter_cfg.setChainToNext();
1058 varlen_output_buffer,
1066 llvm::Value* nullcheck_cond{
nullptr};
1068 nullcheck_cond =
LL_BUILDER.CreateICmpSGE(std::get<1>(agg_out_ptr_w_idx),
1072 std::get<0>(agg_out_ptr_w_idx),
1073 llvm::ConstantPointerNull::get(
1077 nullcheck_cond,
executor_,
false,
"groupby_nullcheck", &filter_cfg,
false);
1079 varlen_output_buffer,
1086 can_return_error =
true;
1096 code_generator.
posArg(
nullptr),
1102 std::stack<llvm::BasicBlock*> array_loops;
1105 auto arg_it =
ROW_FUNC->arg_begin();
1106 std::vector<llvm::Value*> agg_out_vec;
1108 agg_out_vec.push_back(&*arg_it++);
1123 }
else if (sc_false) {
1124 const auto saved_insert_block =
LL_BUILDER.GetInsertBlock();
1127 LL_BUILDER.SetInsertPoint(saved_insert_block);
1130 return can_return_error;
1134 llvm::Value* groups_buffer,
1148 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1152 CHECK_GE(only_order_entry.tle_no,
int(1));
1153 const size_t target_idx = only_order_entry.tle_no - 1;
1156 const auto chosen_bytes =
1158 auto order_entry_lv =
executor_->cgen_state_->castToTypeIn(
1159 code_generator.
codegen(order_entry_expr,
true, co).front(), chosen_bytes * 8);
1161 std::string fname =
"get_bin_from_k_heap";
1162 const auto& oe_ti = order_entry_expr->get_type_info();
1163 llvm::Value* null_key_lv =
nullptr;
1164 if (oe_ti.is_integer() || oe_ti.is_decimal() || oe_ti.is_time()) {
1165 const size_t bit_width = order_entry_lv->getType()->getIntegerBitWidth();
1166 switch (bit_width) {
1178 CHECK(oe_ti.is_fp());
1179 if (order_entry_lv->getType()->isDoubleTy()) {
1184 fname += order_entry_lv->getType()->isDoubleTy() ?
"_double" :
"_float";
1186 const auto key_slot_idx =
1194 LL_BOOL(only_order_entry.is_desc),
1195 LL_BOOL(!order_entry_expr->get_type_info().get_notnull()),
1196 LL_BOOL(only_order_entry.nulls_first),
1201 const auto output_buffer_entry_count_lv =
1202 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1204 const auto group_expr_lv =
1205 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1206 std::vector<llvm::Value*>
args{groups_buffer,
1207 output_buffer_entry_count_lv,
1209 code_generator.
posArg(
nullptr)};
1211 const auto columnar_output_offset =
1213 return columnar_output_offset;
1225 auto arg_it =
ROW_FUNC->arg_begin();
1226 auto groups_buffer = arg_it++;
1228 std::stack<llvm::BasicBlock*> array_loops;
1233 return std::make_tuple(
1237 return std::make_tuple(
1250 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1256 llvm::Value* group_key =
nullptr;
1257 llvm::Value* key_size_lv =
nullptr;
1268 col_width_size ==
sizeof(int32_t)
1276 int32_t subkey_idx = 0;
1279 const auto col_range_info =
1281 const auto translated_null_value =
static_cast<int64_t
>(
1286 (col_range_info.bucket ? col_range_info.bucket : 1));
1288 const bool col_has_nulls =
1293 : col_range_info.has_nulls)
1296 const auto group_expr_lvs =
1297 executor_->groupByColumnCodegen(group_expr.get(),
1301 translated_null_value,
1305 const auto group_expr_lv = group_expr_lvs.translated_value;
1312 group_expr_lvs.original_value,
1319 group_key->getType()->getScalarType()->getPointerElementType(),
1328 &*groups_buffer, group_key, key_size_lv, query_mem_desc, row_size_quad);
1340 return std::make_tuple(
nullptr,
nullptr);
1350 auto arg_it =
ROW_FUNC->arg_begin();
1352 auto varlen_output_buffer = arg_it++;
1353 CHECK(varlen_output_buffer->getType() == llvm::Type::getInt64PtrTy(
LL_CONTEXT));
1354 return varlen_output_buffer;
1357 std::tuple<llvm::Value*, llvm::Value*>
1361 llvm::Value* groups_buffer,
1362 llvm::Value* group_expr_lv_translated,
1363 llvm::Value* group_expr_lv_original,
1364 const int32_t row_size_quad) {
1368 ?
"get_columnar_group_bin_offset"
1369 :
"get_group_value_fast"};
1371 get_group_fn_name +=
"_keyless";
1376 get_group_fn_name +=
"_semiprivate";
1378 std::vector<llvm::Value*> get_group_fn_args{&*groups_buffer,
1379 &*group_expr_lv_translated};
1380 if (group_expr_lv_original && get_group_fn_name ==
"get_group_value_fast" &&
1382 get_group_fn_name +=
"_with_original_key";
1383 get_group_fn_args.push_back(group_expr_lv_original);
1389 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1393 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1397 get_group_fn_args.push_back(warp_idx);
1401 if (get_group_fn_name ==
"get_columnar_group_bin_offset") {
1402 return std::make_tuple(&*groups_buffer,
1403 emitCall(get_group_fn_name, get_group_fn_args));
1405 return std::make_tuple(
emitCall(get_group_fn_name, get_group_fn_args),
nullptr);
1409 llvm::Value* groups_buffer,
1410 llvm::Value* group_key,
1411 llvm::Value* key_size_lv,
1413 const int32_t row_size_quad) {
1420 LL_BUILDER.CreateCall(perfect_hash_func, std::vector<llvm::Value*>{group_key});
1424 const std::string set_matching_func_name{
1425 "set_matching_group_value_perfect_hash_columnar"};
1426 const std::vector<llvm::Value*> set_matching_func_arg{
1433 emitCall(set_matching_func_name, set_matching_func_arg);
1435 return std::make_tuple(groups_buffer, hash_lv);
1438 return std::make_tuple(
emitCall(
"get_matching_group_value_perfect_hash_keyless",
1439 {groups_buffer, hash_lv,
LL_INT(row_size_quad)}),
1442 return std::make_tuple(
1444 "get_matching_group_value_perfect_hash",
1445 {groups_buffer, hash_lv, group_key, key_size_lv,
LL_INT(row_size_quad)}),
1451 std::tuple<llvm::Value*, llvm::Value*>
1454 llvm::Value* groups_buffer,
1455 llvm::Value* group_key,
1456 llvm::Value* key_size_lv,
1458 const size_t key_width,
1459 const int32_t row_size_quad) {
1461 if (group_key->getType() != llvm::Type::getInt64PtrTy(
LL_CONTEXT)) {
1462 CHECK(key_width ==
sizeof(int32_t));
1466 std::vector<llvm::Value*> func_args{
1471 LL_INT(static_cast<int32_t>(key_width))};
1472 std::string func_name{
"get_group_value"};
1474 func_name +=
"_columnar_slot";
1476 func_args.push_back(
LL_INT(row_size_quad));
1479 func_name +=
"_with_watchdog";
1482 return std::make_tuple(groups_buffer,
emitCall(func_name, func_args));
1484 return std::make_tuple(
emitCall(func_name, func_args),
nullptr);
1491 auto ft = llvm::FunctionType::get(
1495 auto key_hash_func = llvm::Function::Create(ft,
1496 llvm::Function::ExternalLinkage,
1499 executor_->cgen_state_->helper_functions_.push_back(key_hash_func);
1501 auto& key_buff_arg = *key_hash_func->args().begin();
1502 llvm::Value* key_buff_lv = &key_buff_arg;
1503 auto bb = llvm::BasicBlock::Create(
LL_CONTEXT,
"entry", key_hash_func);
1504 llvm::IRBuilder<> key_hash_func_builder(bb);
1506 std::vector<int64_t> cardinalities;
1508 auto col_range_info =
1515 auto* gep = key_hash_func_builder.CreateGEP(
1516 key_buff_lv->getType()->getScalarType()->getPointerElementType(),
1520 key_hash_func_builder.CreateLoad(gep->getType()->getPointerElementType(), gep);
1521 auto col_range_info =
1524 key_hash_func_builder.CreateSub(key_comp_lv,
LL_INT(col_range_info.min));
1525 if (col_range_info.bucket) {
1527 key_hash_func_builder.CreateSDiv(crt_term_lv,
LL_INT(col_range_info.bucket));
1529 for (
size_t prev_dim_idx = 0; prev_dim_idx < dim_idx; ++prev_dim_idx) {
1530 crt_term_lv = key_hash_func_builder.CreateMul(crt_term_lv,
1531 LL_INT(cardinalities[prev_dim_idx]));
1533 hash_lv = key_hash_func_builder.CreateAdd(hash_lv, crt_term_lv);
1536 key_hash_func_builder.CreateRet(
1538 return key_hash_func;
1543 llvm::Value* target) {
1545 const auto& agg_type = agg_info.
sql_type;
1546 const size_t chosen_bytes = agg_type.
get_size();
1548 bool need_conversion{
false};
1549 llvm::Value* arg_null{
nullptr};
1550 llvm::Value* agg_null{
nullptr};
1551 llvm::Value* target_to_cast{target};
1552 if (arg_type.
is_fp()) {
1553 arg_null =
executor_->cgen_state_->inlineFpNull(arg_type);
1554 if (agg_type.is_fp()) {
1555 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1556 if (!static_cast<llvm::ConstantFP*>(arg_null)->isExactlyValue(
1557 static_cast<llvm::ConstantFP*>(agg_null)->getValueAPF())) {
1558 need_conversion =
true;
1565 arg_null =
executor_->cgen_state_->inlineIntNull(arg_type);
1566 if (agg_type.is_fp()) {
1567 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1568 need_conversion =
true;
1569 target_to_cast =
executor_->castToFP(target, arg_type, agg_type);
1571 agg_null =
executor_->cgen_state_->inlineIntNull(agg_type);
1572 if ((static_cast<llvm::ConstantInt*>(arg_null)->getBitWidth() !=
1573 static_cast<llvm::ConstantInt*>(agg_null)->getBitWidth()) ||
1574 (static_cast<llvm::ConstantInt*>(arg_null)->getValue() !=
1575 static_cast<llvm::ConstantInt*>(agg_null)->getValue())) {
1576 need_conversion =
true;
1580 if (need_conversion) {
1581 auto cmp = arg_type.
is_fp() ?
LL_BUILDER.CreateFCmpOEQ(target, arg_null)
1586 executor_->cgen_state_->castToTypeIn(target_to_cast, chosen_bytes << 3));
1598 const auto window_func_context =
1603 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1604 auto arg_it =
ROW_FUNC->arg_begin();
1605 auto groups_buffer = arg_it++;
1608 window_func_context, code_generator.
posArg(
nullptr));
1609 const auto pos_in_window =
1611 llvm::Value* entry_count_lv =
1613 std::vector<llvm::Value*>
args{
1614 &*groups_buffer, entry_count_lv, pos_in_window, code_generator.
posArg(
nullptr)};
1616 const auto columnar_output_offset =
1623 auto arg_it =
ROW_FUNC->arg_begin();
1624 auto groups_buffer = arg_it++;
1629 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
1630 llvm::Value* varlen_output_buffer,
1631 const std::vector<llvm::Value*>& agg_out_vec,
1637 auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
1640 const bool is_group_by = std::get<0>(agg_out_ptr_w_idx);
1641 bool can_return_error =
false;
1643 CHECK(agg_out_vec.empty());
1645 CHECK(!agg_out_vec.empty());
1650 llvm::Value* output_buffer_byte_stream{
nullptr};
1651 llvm::Value* out_row_idx{
nullptr};
1654 output_buffer_byte_stream =
LL_BUILDER.CreateBitCast(
1655 std::get<0>(agg_out_ptr_w_idx),
1656 llvm::PointerType::get(llvm::Type::getInt8Ty(
LL_CONTEXT), 0));
1657 output_buffer_byte_stream->setName(
"out_buff_b_stream");
1658 CHECK(std::get<1>(agg_out_ptr_w_idx));
1659 out_row_idx =
LL_BUILDER.CreateZExt(std::get<1>(agg_out_ptr_w_idx),
1661 out_row_idx->setName(
"out_row_idx");
1670 target_builder(target_expr,
executor_, query_mem_desc, co);
1680 output_buffer_byte_stream,
1682 varlen_output_buffer,
1687 executor_->plan_state_->isLazyFetchColumn(target_expr);
1690 return can_return_error;
1697 llvm::Value* output_buffer_byte_stream,
1698 llvm::Value* out_row_idx,
1699 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
1701 const size_t chosen_bytes,
1702 const size_t agg_out_off,
1703 const size_t target_idx) {
1705 llvm::Value* agg_col_ptr{
nullptr};
1711 CHECK(chosen_bytes == 1 || chosen_bytes == 2 || chosen_bytes == 4 ||
1713 CHECK(output_buffer_byte_stream);
1717 auto out_per_col_byte_idx =
1719 LL_BUILDER.CreateShl(out_row_idx, __lzcnt(chosen_bytes) - 1);
1721 LL_BUILDER.CreateShl(out_row_idx, __builtin_ffs(chosen_bytes) - 1);
1723 auto byte_offset =
LL_BUILDER.CreateAdd(out_per_col_byte_idx,
1724 LL_INT(static_cast<int64_t>(col_off)));
1725 byte_offset->setName(
"out_byte_off_target_" +
std::to_string(target_idx));
1727 output_buffer_byte_stream->getType()->getScalarType()->getPointerElementType(),
1728 output_buffer_byte_stream,
1733 agg_col_ptr->setName(
"out_ptr_target_" +
std::to_string(target_idx));
1735 auto const col_off_in_bytes = query_mem_desc.
getColOffInBytes(agg_out_off);
1736 auto const col_off = col_off_in_bytes / chosen_bytes;
1737 auto const col_rem = col_off_in_bytes % chosen_bytes;
1738 CHECK_EQ(col_rem, 0u) << col_off_in_bytes <<
" % " << chosen_bytes;
1739 CHECK(std::get<1>(agg_out_ptr_w_idx));
1741 std::get<1>(agg_out_ptr_w_idx),
1745 std::get<0>(agg_out_ptr_w_idx),
1748 bit_cast->getType()->getScalarType()->getPointerElementType(),
1754 auto const col_off = col_off_in_bytes / chosen_bytes;
1755 auto const col_rem = col_off_in_bytes % chosen_bytes;
1756 CHECK_EQ(col_rem, 0u) << col_off_in_bytes <<
" % " << chosen_bytes;
1758 std::get<0>(agg_out_ptr_w_idx),
1761 bit_cast->getType()->getScalarType()->getPointerElementType(),
1775 auto estimator_comp_count_lv =
LL_INT(static_cast<int32_t>(estimator_arg.size()));
1777 estimator_comp_count_lv);
1778 int32_t subkey_idx = 0;
1779 for (
const auto& estimator_arg_comp : estimator_arg) {
1780 const auto estimator_arg_comp_lvs =
1781 executor_->groupByColumnCodegen(estimator_arg_comp.get(),
1789 CHECK(!estimator_arg_comp_lvs.original_value);
1790 const auto estimator_arg_comp_lv = estimator_arg_comp_lvs.translated_value;
1793 estimator_arg_comp_lv,
1795 estimator_key_lv->getType()->getScalarType()->getPointerElementType(),
1801 const auto key_bytes =
LL_BUILDER.CreateBitCast(estimator_key_lv, int8_ptr_ty);
1802 const auto estimator_comp_bytes_lv =
1803 LL_INT(static_cast<int32_t>(estimator_arg.size() *
sizeof(int64_t)));
1804 const auto bitmap_size_lv =
1807 {bitmap, &*bitmap_size_lv, key_bytes, &*estimator_comp_bytes_lv});
1816 const int64_t skip_val) {
1817 if (val != skip_val) {
1829 auto* mode_map =
reinterpret_cast<AggMode*
>(*agg);
1834 const size_t target_idx,
1836 std::vector<llvm::Value*>& agg_args,
1841 const auto& arg_ti =
1843 if (arg_ti.is_fp()) {
1844 agg_args.back() =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1847 const auto& count_distinct_descriptor =
1852 agg_args.push_back(
LL_INT(int32_t(count_distinct_descriptor.bitmap_sz_bits)));
1856 agg_args.push_back(base_dev_addr);
1857 agg_args.push_back(base_host_addr);
1858 emitCall(
"agg_approximate_count_distinct_gpu", agg_args);
1860 emitCall(
"agg_approximate_count_distinct", agg_args);
1864 std::string agg_fname{
"agg_count_distinct"};
1866 agg_fname +=
"_bitmap";
1867 agg_args.push_back(
LL_INT(static_cast<int64_t>(count_distinct_descriptor.min_val)));
1869 if (agg_info.skip_null_val) {
1870 auto null_lv =
executor_->cgen_state_->castToTypeIn(
1872 ?
static_cast<llvm::Value*
>(
executor_->cgen_state_->inlineFpNull(arg_ti))
1873 : static_cast<llvm::Value*>(
executor_->cgen_state_->inlineIntNull(arg_ti))),
1875 null_lv =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1877 agg_fname +=
"_skip_val";
1878 agg_args.push_back(null_lv);
1882 agg_fname +=
"_gpu";
1885 agg_args.push_back(base_dev_addr);
1886 agg_args.push_back(base_host_addr);
1887 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.sub_bitmap_count)));
1889 count_distinct_descriptor.bitmapPaddedSizeBytes() %
1890 count_distinct_descriptor.sub_bitmap_count);
1891 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.bitmapPaddedSizeBytes() /
1892 count_distinct_descriptor.sub_bitmap_count)));
1897 executor_->cgen_state_->emitExternalCall(
1898 agg_fname, llvm::Type::getVoidTy(
LL_CONTEXT), agg_args);
1903 const size_t target_idx,
1905 std::vector<llvm::Value*>& agg_args,
1911 llvm::BasicBlock *calc, *skip{
nullptr};
1917 auto* cs =
executor_->cgen_state_.get();
1918 auto& irb = cs->ir_builder_;
1920 auto*
const null_value = cs->castToTypeIn(cs->inlineNull(arg_ti), 64);
1921 auto*
const skip_cond = arg_ti.is_fp()
1922 ? irb.CreateFCmpOEQ(agg_args.back(), null_value)
1923 : irb.CreateICmpEQ(agg_args.back(), null_value);
1924 calc = llvm::BasicBlock::Create(cs->context_,
"calc_approx_quantile");
1925 skip = llvm::BasicBlock::Create(cs->context_,
"skip_approx_quantile");
1926 irb.CreateCondBr(skip_cond, skip, calc);
1927 cs->current_func_->getBasicBlockList().push_back(calc);
1928 irb.SetInsertPoint(calc);
1930 if (!arg_ti.is_fp()) {
1932 agg_args.back() =
executor_->castToFP(agg_args.back(), arg_ti, agg_info.sql_type);
1934 cs->emitExternalCall(
1935 "agg_approx_quantile", llvm::Type::getVoidTy(cs->context_), agg_args);
1938 cs->current_func_->getBasicBlockList().push_back(skip);
1939 irb.SetInsertPoint(skip);
1945 std::vector<llvm::Value*>& agg_args,
1951 llvm::BasicBlock *calc, *skip{
nullptr};
1956 bool const is_fp = arg_ti.is_fp();
1957 auto* cs =
executor_->cgen_state_.get();
1958 auto& irb = cs->ir_builder_;
1960 auto*
const null_value =
1961 is_fp ? cs->inlineNull(arg_ti) : cs->castToTypeIn(cs->inlineNull(arg_ti), 64);
1962 auto*
const skip_cond = is_fp ? irb.CreateFCmpOEQ(agg_args.back(), null_value)
1963 : irb.CreateICmpEQ(agg_args.back(), null_value);
1964 calc = llvm::BasicBlock::Create(cs->context_,
"calc_mode");
1965 skip = llvm::BasicBlock::Create(cs->context_,
"skip_mode");
1966 irb.CreateCondBr(skip_cond, skip, calc);
1967 cs->current_func_->getBasicBlockList().push_back(calc);
1968 irb.SetInsertPoint(calc);
1971 auto*
const int_type =
get_int_type(8 * arg_ti.get_size(), cs->context_);
1972 agg_args.back() = irb.CreateBitCast(agg_args.back(), int_type);
1975 cs->emitExternalCall(
"agg_mode_func", llvm::Type::getVoidTy(cs->context_), agg_args);
1978 cs->current_func_->getBasicBlockList().push_back(skip);
1979 irb.SetInsertPoint(skip);
1992 return LL_BUILDER.CreateLoad(gep->getType()->getPointerElementType(), gep);
2007 if (target_ti.is_buffer() &&
2008 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
2009 const auto target_lvs =
2010 agg_expr ? code_generator.
codegen(agg_expr->get_arg(),
true, co)
2012 target_expr, !
executor_->plan_state_->allow_lazy_fetch_, co);
2013 if (!func_expr && !arr_expr) {
2016 if (target_ti.is_bytes()) {
2017 CHECK_EQ(
size_t(3), target_lvs.size());
2018 return {target_lvs[1], target_lvs[2]};
2020 CHECK(target_ti.is_array());
2021 CHECK_EQ(
size_t(1), target_lvs.size());
2022 CHECK(!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE);
2026 const auto& elem_ti = target_ti.get_elem_type();
2028 executor_->cgen_state_->emitExternalCall(
2031 {target_lvs.front(), code_generator.
posArg(target_expr)}),
2032 executor_->cgen_state_->emitExternalCall(
2035 {target_lvs.front(),
2036 code_generator.
posArg(target_expr),
2040 throw std::runtime_error(
2041 "Using array[] operator as argument to an aggregate operator is not "
2044 CHECK(func_expr || arr_expr);
2045 if (dynamic_cast<const Analyzer::FunctionOper*>(target_expr)) {
2046 CHECK_EQ(
size_t(1), target_lvs.size());
2047 const auto prefix = target_ti.get_buffer_name();
2048 CHECK(target_ti.is_array() || target_ti.is_bytes());
2049 const auto target_lv =
LL_BUILDER.CreateLoad(
2050 target_lvs[0]->getType()->getPointerElementType(), target_lvs[0]);
2054 const auto i8p_ty = llvm::PointerType::get(
2056 const auto ptr =
LL_BUILDER.CreatePointerCast(
2057 LL_BUILDER.CreateExtractValue(target_lv, 0), i8p_ty);
2058 const auto size =
LL_BUILDER.CreateExtractValue(target_lv, 1);
2059 const auto null_flag =
LL_BUILDER.CreateExtractValue(target_lv, 2);
2060 const auto nullcheck_ok_bb =
2062 const auto nullcheck_fail_bb = llvm::BasicBlock::Create(
2066 const auto nullcheck =
LL_BUILDER.CreateICmpEQ(
2067 null_flag,
executor_->cgen_state_->llInt(static_cast<int8_t>(1)));
2068 LL_BUILDER.CreateCondBr(nullcheck, nullcheck_fail_bb, nullcheck_ok_bb);
2073 auto result_phi =
LL_BUILDER.CreatePHI(i8p_ty, 2, prefix +
"_ptr_return");
2074 result_phi->addIncoming(ptr, nullcheck_ok_bb);
2075 const auto null_arr_sentinel =
LL_BUILDER.CreateIntToPtr(
2076 executor_->cgen_state_->llInt(static_cast<int8_t>(0)), i8p_ty);
2077 result_phi->addIncoming(null_arr_sentinel, nullcheck_fail_bb);
2079 executor_->cgen_state_->emitExternalCall(
2080 "register_buffer_with_executor_rsm",
2081 llvm::Type::getVoidTy(
executor_->cgen_state_->context_),
2084 LL_BUILDER.SetInsertPoint(nullcheck_fail_bb);
2088 return {result_phi, size};
2090 CHECK_EQ(
size_t(2), target_lvs.size());
2091 return {target_lvs[0], target_lvs[1]};
2094 if (target_ti.is_geometry() &&
2095 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
2096 auto generate_coord_lvs =
2097 [&](
auto* selected_target_expr,
2098 bool const fetch_columns) -> std::vector<llvm::Value*> {
2099 const auto target_lvs =
2100 code_generator.
codegen(selected_target_expr, fetch_columns, co);
2101 if (dynamic_cast<const Analyzer::GeoOperator*>(target_expr) &&
2108 if (geo_uoper || geo_binoper) {
2110 CHECK_EQ(2 * static_cast<size_t>(target_ti.get_physical_coord_cols()),
2114 CHECK_EQ(static_cast<size_t>(target_ti.get_physical_coord_cols()),
2120 std::vector<llvm::Value*> coords;
2122 for (
const auto& target_lv : target_lvs) {
2128 const size_t elem_sz = ctr == 0 ? 1 : 4;
2130 int32_t fixlen = -1;
2131 if (target_ti.get_type() ==
kPOINT) {
2134 const auto coords_cd =
executor_->getPhysicalColumnDescriptor(col_var, 1);
2135 if (coords_cd && coords_cd->columnType.get_type() ==
kARRAY) {
2136 fixlen = coords_cd->columnType.get_size();
2141 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2142 "fast_fixlen_array_buff",
2144 {target_lv, code_generator.
posArg(selected_target_expr)}));
2145 coords.push_back(
executor_->cgen_state_->llInt(int64_t(fixlen)));
2148 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2151 {target_lv, code_generator.
posArg(selected_target_expr)}));
2152 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2156 code_generator.
posArg(selected_target_expr),
2163 return generate_coord_lvs(agg_expr->get_arg(),
true);
2165 return generate_coord_lvs(target_expr,
2166 !
executor_->plan_state_->allow_lazy_fetch_);
2170 return agg_expr ? code_generator.codegen(agg_expr->get_arg(),
true, co)
2171 : code_generator.codegen(
2172 target_expr, !
executor_->plan_state_->allow_lazy_fetch_, co);
2176 const std::vector<llvm::Value*>&
args) {
2178 return executor_->cgen_state_->emitCall(fname, args);
2183 auto zero_const = llvm::ConstantInt::get(retCode->getType(), 0,
true);
2184 auto rc_check_condition =
executor_->cgen_state_->ir_builder_.CreateICmp(
2185 llvm::ICmpInst::ICMP_EQ, retCode, zero_const);
2187 executor_->cgen_state_->emitErrorCheck(rc_check_condition, retCode,
"rc");
2204 const auto grouped_col_expr =
2206 if (!grouped_col_expr) {
2209 const auto& column_key = grouped_col_expr->
getColumnKey();
2210 if (column_key.table_id <= 0) {
2214 {column_key.db_id, column_key.table_id});
2215 if (td->shardedColumnId == column_key.column_id) {
RUNTIME_EXPORT void agg_approx_quantile(int64_t *agg, const double val)
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
std::vector< Analyzer::Expr * > target_exprs
SqlWindowFunctionKind getKind() const
ExecutorDeviceType device_type
size_t g_watchdog_baseline_max_groups
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
robin_hood::unordered_set< int64_t > CountDistinctSet
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
llvm::Value * getAdditionalLiteral(const int32_t off)
ColRangeInfo get_expr_range_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const Analyzer::Expr *expr, Executor *executor)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
HOST DEVICE int get_size() const
size_t getEntryCount() const
static bool colvar_comp(const ColumnVar *l, const ColumnVar *r)
RUNTIME_EXPORT void agg_count_distinct(int64_t *agg, const int64_t val)
int hll_size_for_rate(const int err_percent)
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context)
boost::multiprecision::number< boost::multiprecision::cpp_int_backend< 64, 64, boost::multiprecision::signed_magnitude, boost::multiprecision::checked, void >> checked_int64_t
bool is_column_range_too_big_for_perfect_hash(const ColRangeInfo &col_range_info, const int64_t max_entry_count)
void collect_column_var(std::set< const ColumnVar *, bool(*)(const ColumnVar *, const ColumnVar *)> &colvar_set, bool include_agg) const override
KeylessInfo get_keyless_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const bool is_group_by, Executor *executor)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
Streaming Top N algorithm.
bool mustUseBaselineSort() const
std::ostream & operator<<(std::ostream &os, const SessionInfo &session_info)
void mark_function_always_inline(llvm::Function *func)
ColRangeInfo getColRangeInfo()
bool hasVarlenOutput() const
const std::list< Analyzer::OrderEntry > order_entries
QueryDescriptionType hash_type_
llvm::Value * posArg(const Analyzer::Expr *) const
static std::unique_ptr< QueryMemoryDescriptor > init(const Executor *executor, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ColRangeInfo &col_range_info, const KeylessInfo &keyless_info, const bool allow_multifrag, const ExecutorDeviceType device_type, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, const size_t shard_count, const size_t max_groups_buffer_entry_count, RenderInfo *render_info, const CountDistinctDescriptors count_distinct_descriptors, const bool must_use_baseline_sort, const bool output_columnar_hint, const bool streaming_top_n_hint)
const TableDescriptor * get_metadata_for_table(const ::shared::TableKey &table_key, bool populate_fragmenter)
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
bool hasKeylessHash() const
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
llvm::Value * codegenVarlenOutputBuffer(const QueryMemoryDescriptor &query_mem_desc)
size_t getEffectiveKeyWidth() const
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool with_dynamic_watchdog
CountDistinctDescriptors init_count_distinct_descriptors(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ColRangeInfo &group_by_range_info, const ExecutorDeviceType device_type, Executor *executor)
size_t get_heap_key_slot_index(const std::vector< Analyzer::Expr * > &target_exprs, const size_t target_idx)
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
bool takes_float_argument(const TargetInfo &target_info)
#define LLVM_ALIGN(alignment)
RUNTIME_EXPORT void agg_mode_func(int64_t *agg, const int64_t val)
bool has_count_distinct(const RelAlgExecutionUnit &ra_exe_unit)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
CountDistinctImplType impl_type_
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
size_t getRowSize() const
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
int64_t getMaxVal() const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
llvm::Value * codegenWindowPosition(const WindowFunctionContext *window_func_context, llvm::Value *pos_arg)
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const std::optional< int64_t > &group_cardinality_estimation)
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
bool useStreamingTopN() const
std::vector< CountDistinctDescriptor > CountDistinctDescriptors
size_t getGroupbyColCount() const
const ColumnDescriptor * get_column_descriptor_maybe(const shared::ColumnKey &column_key)
RUNTIME_EXPORT void agg_count_distinct_skip_val(int64_t *agg, const int64_t val, const int64_t skip_val)
const JoinQualsPerNestingLevel join_quals
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, llvm::Value *varlen_output_buffer, const std::vector< llvm::Value * > &agg_out_vec, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
int64_t g_bitmap_memory_limit
bool is_distinct_target(const TargetInfo &target_info)
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
const std::shared_ptr< Analyzer::Estimator > estimator
This file includes the class specification for the buffer manager (BufferMgr), and related data struc...
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
const SQLTypeInfo & get_type_info() const
QueryDescriptionType getQueryDescriptionType() const
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
ExecutorDeviceType device_type
int64_t getBucket() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
const std::vector< InputTableInfo > & query_infos_
bool isSingleColumnGroupByWithPerfectHash() const
const shared::ColumnKey & getColumnKey() const
int64_t get_bucketed_cardinality_without_nulls(const ColRangeInfo &col_range_info)
bool expr_is_rowid(const Analyzer::Expr *expr)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit)
std::unordered_map< size_t, SQLTypeInfo > target_exprs_original_type_infos
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool cardinality_estimate_less_than_column_range(const int64_t cardinality_estimate, const ColRangeInfo &col_range_info)
int32_t get_agg_count(const std::vector< Analyzer::Expr * > &target_exprs)
void add(Value const value)
Descriptor for the result set buffer layout.
const std::optional< int64_t > group_cardinality_estimation_
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
std::list< std::shared_ptr< Analyzer::Expr > > quals
bool didOutputColumnar() const
bool usesGetGroupValueFast() const
bool interleavedBins(const ExecutorDeviceType) const
bool threadsShareMemory() const
int64_t getMinVal() const
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
Estimators to be used when precise cardinality isn't useful.
int64_t get_epoch_days_from_seconds(const int64_t seconds)
RUNTIME_EXPORT ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)
static size_t getBaselineThreshold(bool for_count_distinct, ExecutorDeviceType device_type)
uint32_t log2_bytes(const uint32_t bytes)
HOST DEVICE bool get_notnull() const
const RelAlgExecutionUnit & ra_exe_unit_
size_t getColOffInBytes(const size_t col_idx) const
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals