32 #include "../CudaMgr/CudaMgr.h"
33 #include "../Shared/checked_alloc.h"
34 #include "../Shared/funcannotations.h"
35 #include "../Utils/ChunkIter.h"
45 #include <llvm/Transforms/Utils/BasicBlockUtils.h>
50 #include <string_view>
61 return min == 0 &&
max == -1;
65 out <<
"Hash Type = " << info.
hash_type_ <<
" min = " << info.
min
66 <<
" max = " << info.
max <<
" bucket = " << info.
bucket
67 <<
" has_nulls = " << info.
has_nulls <<
"\n";
80 out <<
"UnorderedSet";
83 out <<
"<Unkown Type>";
102 for (
auto target_expr : target_exprs) {
105 if (!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE) {
107 if (ti.is_buffer()) {
109 }
else if (ti.is_geometry()) {
110 agg_count += ti.get_physical_coord_cols() * 2;
116 if (agg_expr && agg_expr->get_aggtype() ==
kAVG) {
131 if (!cd || !cd->isVirtualCol) {
139 for (
const auto& target_expr : ra_exe_unit.
target_exprs) {
149 const int64_t max_entry_count) {
175 const std::vector<InputTableInfo>& query_infos,
177 Executor* executor) {
183 expr, query_infos, executor, boost::make_optional(ra_exe_unit.
simple_quals));
184 switch (expr_range.getType()) {
186 if (expr_range.getIntMin() > expr_range.getIntMax()) {
191 expr_range.getIntMin(),
192 expr_range.getIntMax(),
193 expr_range.getBucket(),
194 expr_range.hasNulls()};
198 if (expr_range.getFpMin() > expr_range.getFpMax()) {
221 const int64_t baseline_threshold =
228 bool has_nulls{
false};
236 group_cardinality_estimation,
242 cardinality *= crt_col_cardinality;
243 if (col_range_info.has_nulls) {
248 if (!cardinality || cardinality > baseline_threshold) {
251 group_cardinality_estimation,
259 int64_t(cardinality),
265 group_cardinality_estimation,
279 group_cardinality_estimation,
286 return col_range_info;
288 static const int64_t MAX_BUFFER_SIZE = 1 << 30;
289 const int64_t col_count =
291 int64_t max_entry_count = MAX_BUFFER_SIZE / (col_count *
sizeof(int64_t));
293 max_entry_count = std::min(max_entry_count, baseline_threshold);
296 if (groupby_expr_ti.is_string() && !col_range_info.bucket) {
299 const bool has_filters =
323 col_range_info.has_nulls};
326 return col_range_info;
338 col_range_info.has_nulls};
343 !col_range_info.bucket) {
348 col_range_info.has_nulls};
350 return col_range_info;
356 if (col_range_info.
bucket) {
357 crt_col_cardinality /= col_range_info.
bucket;
359 return static_cast<int64_t
>(crt_col_cardinality +
360 (1 + (col_range_info.
has_nulls ? 1 : 0)));
366 if (col_range_info.
min <= col_range_info.
max) {
367 size_t size = col_range_info.
max - col_range_info.
min;
368 if (col_range_info.
bucket) {
369 size /= col_range_info.
bucket;
371 if (size >= static_cast<size_t>(std::numeric_limits<int64_t>::max())) {
376 return static_cast<int64_t
>(size + 1);
383 #define LL_CONTEXT executor_->cgen_state_->context_
384 #define LL_BUILDER executor_->cgen_state_->ir_builder_
385 #define LL_BOOL(v) executor_->cgen_state_->llBool(v)
386 #define LL_INT(v) executor_->cgen_state_->llInt(v)
387 #define LL_FP(v) executor_->cgen_state_->llFp(v)
388 #define ROW_FUNC executor_->cgen_state_->row_func_
389 #define CUR_FUNC executor_->cgen_state_->current_func_
395 const std::vector<InputTableInfo>& query_infos,
396 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
397 const std::optional<int64_t>& group_cardinality_estimation)
399 , ra_exe_unit_(ra_exe_unit)
400 , query_infos_(query_infos)
401 , row_set_mem_owner_(row_set_mem_owner)
402 , device_type_(device_type)
403 , group_cardinality_estimation_(group_cardinality_estimation) {
408 const auto& groupby_ti = groupby_expr->get_type_info();
409 if (groupby_ti.is_text_encoding_none()) {
410 throw std::runtime_error(
411 "Cannot group by string columns which are not dictionary encoded.");
413 if (groupby_ti.is_buffer()) {
414 throw std::runtime_error(
"Group by buffer not supported");
416 if (groupby_ti.is_geometry()) {
417 throw std::runtime_error(
"Group by geometry not supported");
423 const size_t shard_count)
const {
424 size_t device_count{0};
426 device_count =
executor_->cudaMgr()->getDeviceCount();
430 int64_t bucket{col_range_info.
bucket};
453 if (device_count < shard_count) {
454 bucket =
g_leaf_count ? std::max(device_count, static_cast<size_t>(1))
455 : std::min(device_count, shard_count - device_count);
457 bucket = shard_count * std::max(
g_leaf_count, static_cast<size_t>(1));
476 const std::vector<InputTableInfo>& query_infos,
477 const bool is_group_by,
478 Executor* executor) {
479 bool keyless{
true}, found{
false};
480 int32_t num_agg_expr{0};
482 for (
const auto target_expr : ra_exe_unit.
target_exprs) {
485 if (agg_info.is_agg) {
491 const auto arg_expr =
agg_arg(target_expr);
493 switch (agg_info.agg_kind) {
496 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
499 expr_range_info.hasNulls()) {
506 if (arg_expr && !arg_expr->get_type_info().get_notnull()) {
509 expr_range_info.hasNulls()) {
516 auto arg_ti = arg_expr->get_type_info();
518 arg_ti.set_notnull(
true);
520 if (!arg_ti.get_notnull()) {
523 !expr_range_info.hasNulls()) {
528 switch (expr_range_info.getType()) {
531 if (expr_range_info.getFpMax() < 0 || expr_range_info.getFpMin() > 0) {
536 if (expr_range_info.getIntMax() < 0 || expr_range_info.getIntMin() > 0) {
547 CHECK(agg_expr && agg_expr->get_arg());
548 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
549 if (arg_ti.is_string() || arg_ti.is_buffer()) {
552 auto expr_range_info =
556 is_group_by || float_argument_input,
557 float_argument_input ?
sizeof(
float) : 8);
558 switch (expr_range_info.getType()) {
562 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_max));
563 if (expr_range_info.getFpMax() < double_max) {
569 if (expr_range_info.getIntMax() < init_max) {
579 CHECK(agg_expr && agg_expr->get_arg());
580 const auto& arg_ti = agg_expr->get_arg()->get_type_info();
581 if (arg_ti.is_string() || arg_ti.is_buffer()) {
584 auto expr_range_info =
589 expr_range_info.hasNulls()) {
594 is_group_by || float_argument_input,
595 float_argument_input ?
sizeof(
float) : 8);
596 switch (expr_range_info.getType()) {
600 *
reinterpret_cast<const double*
>(may_alias_ptr(&init_min));
601 if (expr_range_info.getFpMin() > double_min) {
607 if (expr_range_info.getIntMin() > init_min) {
638 const std::vector<InputTableInfo>& query_infos,
641 Executor* executor) {
643 auto compute_bytes_per_group =
645 size_t effective_size_bytes = (bitmap_sz + 7) / 8;
646 const auto padded_size =
649 : effective_size_bytes;
650 return padded_size * sub_bitmap_count;
652 for (
size_t i = 0; i < ra_exe_unit.
target_exprs.size(); i++) {
656 CHECK(agg_info.is_agg);
660 if (arg_ti.is_text_encoding_none()) {
661 throw std::runtime_error(
662 "Strings must be dictionary-encoded for COUNT(DISTINCT).");
665 throw std::runtime_error(
"APPROX_COUNT_DISTINCT on arrays not supported yet");
668 throw std::runtime_error(
669 "APPROX_COUNT_DISTINCT on geometry columns not supported");
671 if (agg_info.is_distinct && arg_ti.is_geometry()) {
672 throw std::runtime_error(
"COUNT DISTINCT on geometry columns not supported");
675 auto arg_range_info =
676 arg_ti.is_fp() ? no_range_info
678 ra_exe_unit, query_infos, agg_expr->get_arg(), executor);
681 const auto& original_target_expr_ti = it->second;
682 if (arg_ti.is_integer() && original_target_expr_ti.get_type() ==
kDATE &&
686 auto is_date_value_not_encoded = [&original_target_expr_ti](int64_t date_val) {
687 if (original_target_expr_ti.get_comp_param() == 16) {
688 return date_val < INT16_MIN || date_val > INT16_MAX;
690 return date_val < INT32_MIN || date_val > INT32_MIN;
693 if (is_date_value_not_encoded(arg_range_info.min)) {
699 if (is_date_value_not_encoded(arg_range_info.max)) {
705 arg_range_info.bucket = 0;
710 int64_t bitmap_sz_bits{0};
712 const auto error_rate_expr = agg_expr->get_arg1();
713 if (error_rate_expr) {
714 CHECK(error_rate_expr->get_type_info().get_type() ==
kINT);
715 auto const error_rate =
718 CHECK_GE(error_rate->get_constval().intval, 1);
724 if (arg_range_info.isEmpty()) {
725 count_distinct_descriptors.emplace_back(
734 const auto sub_bitmap_count =
736 size_t worst_case_num_groups{1};
738 !(arg_ti.is_buffer() || arg_ti.is_geometry())) {
741 if (shared::is_any<kCOUNT, kCOUNT_IF>(agg_info.agg_kind)) {
747 const auto total_bytes_per_entry =
748 compute_bytes_per_group(bitmap_sz_bits, sub_bitmap_count, device_type);
749 const auto range_bucket = std::max(group_by_range_info.
bucket, (int64_t)1);
750 const auto maximum_num_groups =
751 (group_by_range_info.
max - group_by_range_info.
min + 1) / range_bucket;
752 const auto total_bitmap_bytes_for_groups =
753 total_bytes_per_entry * maximum_num_groups;
756 if (total_bitmap_bytes_for_groups >=
758 const auto agg_expr_max_entry_count =
759 arg_range_info.max - arg_range_info.min + 1;
760 int64_t max_agg_expr_table_cardinality{1};
762 bool (*)(
const Analyzer::ColumnVar*,
const Analyzer::ColumnVar*)>
765 for (
const auto cv : colvar_set) {
767 std::find_if(query_infos.begin(),
769 [&](
const auto& input_table_info) {
770 return input_table_info.table_key == cv->getTableKey();
772 int64_t cur_table_cardinality =
773 it != query_infos.end()
774 ?
static_cast<int64_t
>(it->info.getNumTuplesUpperBound())
776 max_agg_expr_table_cardinality =
777 std::max(max_agg_expr_table_cardinality, cur_table_cardinality);
778 worst_case_num_groups *= cur_table_cardinality;
780 auto has_valid_stat = [agg_expr_max_entry_count, maximum_num_groups]() {
781 return agg_expr_max_entry_count > 0 && maximum_num_groups > 0;
784 if (has_valid_stat()) {
788 const size_t unordered_set_threshold{2};
794 const auto bits_for_agg_entry = std::ceil(log(agg_expr_max_entry_count));
795 const auto bits_for_agg_table =
796 std::ceil(log(max_agg_expr_table_cardinality));
797 const auto avg_num_unique_entries_per_group =
798 std::ceil(max_agg_expr_table_cardinality / maximum_num_groups);
805 if ((bits_for_agg_entry - bits_for_agg_table) >= unordered_set_threshold ||
806 agg_expr_max_entry_count >= avg_num_unique_entries_per_group) {
809 throw std::runtime_error(
810 "Consider using approx_count_distinct operator instead of "
811 "count_distinct operator to lower the memory "
820 !(arg_ti.is_array() || arg_ti.is_geometry())) {
823 const size_t too_many_entries{100000000};
825 worst_case_num_groups > too_many_entries &&
828 "Detect too many input entries for set-based count distinct operator under "
831 count_distinct_descriptors.emplace_back(
843 return count_distinct_descriptors;
849 const bool allow_multifrag,
850 const size_t max_groups_buffer_entry_count,
851 const int8_t crt_min_byte_width,
853 const bool output_columnar_hint) {
857 bool sort_on_gpu_hint =
865 bool must_use_baseline_sort = shard_count;
869 max_groups_buffer_entry_count,
873 must_use_baseline_sort,
874 output_columnar_hint);
875 CHECK(query_mem_desc);
876 if (query_mem_desc->sortOnGpu() &&
878 align_to_int64(query_mem_desc->getEntryCount() *
sizeof(int32_t))) >
879 2 * 1024 * 1024 * 1024LL) {
880 must_use_baseline_sort =
true;
881 sort_on_gpu_hint =
false;
890 const bool allow_multifrag,
891 const size_t max_groups_buffer_entry_count,
892 const int8_t crt_min_byte_width,
893 const bool sort_on_gpu_hint,
895 const bool must_use_baseline_sort,
896 const bool output_columnar_hint) {
899 const bool threads_can_reuse_group_by_buffers =
909 const auto col_range_info =
911 col_range_info_nosharding.min,
912 col_range_info_nosharding.max,
914 col_range_info_nosharding.has_nulls};
918 const auto keyless_info =
929 (col_range_info.max - col_range_info.min) /
930 std::max(col_range_info.bucket, int64_t(1)) >
948 max_groups_buffer_entry_count,
950 count_distinct_descriptors,
951 must_use_baseline_sort,
952 output_columnar_hint,
954 threads_can_reuse_group_by_buffers);
956 LOG(
WARNING) << e.what() <<
" Disabling Streaming Top N.";
967 max_groups_buffer_entry_count,
969 count_distinct_descriptors,
970 must_use_baseline_sort,
971 output_columnar_hint,
973 threads_can_reuse_group_by_buffers);
978 const std::list<Analyzer::OrderEntry>& order_entries) {
979 if (order_entries.size() > 1) {
982 for (
const auto& order_entry : order_entries) {
986 if (!dynamic_cast<Analyzer::AggExpr*>(target_expr)) {
991 if (agg_expr->get_is_distinct() || agg_expr->get_aggtype() ==
kAVG ||
992 agg_expr->get_aggtype() ==
kMIN || agg_expr->get_aggtype() ==
kMAX ||
996 if (agg_expr->get_arg()) {
998 if (arg_ti.is_fp()) {
1001 auto expr_range_info =
1006 expr_range_info.has_nulls) &&
1007 order_entry.is_desc == order_entry.nulls_first) {
1011 const auto& target_ti = target_expr->get_type_info();
1012 CHECK(!target_ti.is_buffer());
1013 if (!target_ti.is_integer()) {
1021 llvm::BasicBlock* sc_false,
1026 CHECK(filter_result);
1028 bool can_return_error =
false;
1029 llvm::BasicBlock* filter_false{
nullptr};
1051 llvm::Value* old_total_matched_val{
nullptr};
1053 old_total_matched_val =
1054 LL_BUILDER.CreateAtomicRMW(llvm::AtomicRMWInst::Add,
1057 #
if LLVM_VERSION_MAJOR > 12
1060 llvm::AtomicOrdering::Monotonic);
1062 old_total_matched_val =
LL_BUILDER.CreateLoad(
1063 total_matched_ptr->getType()->getPointerElementType(), total_matched_ptr);
1069 LL_BUILDER.CreateStore(old_total_matched_val, old_total_matched_ptr);
1072 auto agg_out_ptr_w_idx =
codegenGroupBy(query_mem_desc, co, filter_cfg);
1078 filter_cfg.setChainToNext();
1083 varlen_output_buffer,
1091 llvm::Value* nullcheck_cond{
nullptr};
1093 nullcheck_cond =
LL_BUILDER.CreateICmpSGE(std::get<1>(agg_out_ptr_w_idx),
1097 std::get<0>(agg_out_ptr_w_idx),
1098 llvm::ConstantPointerNull::get(
1102 nullcheck_cond,
executor_,
false,
"groupby_nullcheck", &filter_cfg,
false);
1104 varlen_output_buffer,
1111 can_return_error =
true;
1121 code_generator.
posArg(
nullptr),
1127 std::stack<llvm::BasicBlock*> array_loops;
1130 auto arg_it =
ROW_FUNC->arg_begin();
1131 std::vector<llvm::Value*> agg_out_vec;
1133 agg_out_vec.push_back(&*arg_it++);
1148 }
else if (sc_false) {
1149 const auto saved_insert_block =
LL_BUILDER.GetInsertBlock();
1152 LL_BUILDER.SetInsertPoint(saved_insert_block);
1155 return can_return_error;
1159 llvm::Value* groups_buffer,
1173 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1177 CHECK_GE(only_order_entry.tle_no,
int(1));
1178 const size_t target_idx = only_order_entry.tle_no - 1;
1181 const auto chosen_bytes =
1183 auto order_entry_lv =
executor_->cgen_state_->castToTypeIn(
1184 code_generator.
codegen(order_entry_expr,
true, co).front(), chosen_bytes * 8);
1187 std::string fname =
"get_bin_from_k_heap";
1188 const auto& oe_ti = order_entry_expr->get_type_info();
1189 llvm::Value* null_key_lv =
nullptr;
1190 if (oe_ti.is_integer() || oe_ti.is_decimal() || oe_ti.is_time()) {
1191 const size_t bit_width = order_entry_lv->getType()->getIntegerBitWidth();
1192 switch (bit_width) {
1204 CHECK(oe_ti.is_fp());
1205 if (order_entry_lv->getType()->isDoubleTy()) {
1210 fname += order_entry_lv->getType()->isDoubleTy() ?
"_double" :
"_float";
1212 const auto key_slot_idx =
1220 LL_BOOL(only_order_entry.is_desc),
1221 LL_BOOL(!order_entry_expr->get_type_info().get_notnull()),
1222 LL_BOOL(only_order_entry.nulls_first),
1227 const auto output_buffer_entry_count_lv =
1228 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1230 const auto group_expr_lv =
1231 LL_BUILDER.CreateLoad(arg->getType()->getPointerElementType(), arg);
1232 std::vector<llvm::Value*>
args{groups_buffer,
1233 output_buffer_entry_count_lv,
1235 code_generator.
posArg(
nullptr)};
1237 const auto columnar_output_offset =
1239 return columnar_output_offset;
1251 auto arg_it =
ROW_FUNC->arg_begin();
1252 auto groups_buffer = arg_it++;
1254 std::stack<llvm::BasicBlock*> array_loops;
1259 return std::make_tuple(
1263 return std::make_tuple(
1276 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1282 llvm::Value* group_key =
nullptr;
1283 llvm::Value* key_size_lv =
nullptr;
1294 col_width_size ==
sizeof(int32_t)
1302 int32_t subkey_idx = 0;
1305 const auto col_range_info =
1307 const auto translated_null_value =
static_cast<int64_t
>(
1312 (col_range_info.bucket ? col_range_info.bucket : 1));
1314 const bool col_has_nulls =
1319 : col_range_info.has_nulls)
1322 const auto group_expr_lvs =
1323 executor_->groupByColumnCodegen(group_expr.get(),
1327 translated_null_value,
1331 const auto group_expr_lv = group_expr_lvs.translated_value;
1338 group_expr_lvs.original_value,
1345 group_key->getType()->getScalarType()->getPointerElementType(),
1354 &*groups_buffer, group_key, key_size_lv, query_mem_desc, row_size_quad);
1366 return std::make_tuple(
nullptr,
nullptr);
1376 auto arg_it =
ROW_FUNC->arg_begin();
1378 auto varlen_output_buffer = arg_it++;
1379 CHECK(varlen_output_buffer->getType() == llvm::Type::getInt64PtrTy(
LL_CONTEXT));
1380 return varlen_output_buffer;
1383 std::tuple<llvm::Value*, llvm::Value*>
1387 llvm::Value* groups_buffer,
1388 llvm::Value* group_expr_lv_translated,
1389 llvm::Value* group_expr_lv_original,
1390 const int32_t row_size_quad) {
1394 ?
"get_columnar_group_bin_offset"
1395 :
"get_group_value_fast"};
1397 get_group_fn_name +=
"_keyless";
1402 get_group_fn_name +=
"_semiprivate";
1404 std::vector<llvm::Value*> get_group_fn_args{&*groups_buffer,
1405 &*group_expr_lv_translated};
1406 if (group_expr_lv_original && get_group_fn_name ==
"get_group_value_fast" &&
1408 get_group_fn_name +=
"_with_original_key";
1409 get_group_fn_args.push_back(group_expr_lv_original);
1415 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1419 get_group_fn_args.push_back(
LL_INT(row_size_quad));
1423 get_group_fn_args.push_back(warp_idx);
1427 if (get_group_fn_name ==
"get_columnar_group_bin_offset") {
1428 return std::make_tuple(&*groups_buffer,
1429 emitCall(get_group_fn_name, get_group_fn_args));
1431 return std::make_tuple(
emitCall(get_group_fn_name, get_group_fn_args),
nullptr);
1435 llvm::Value* groups_buffer,
1436 llvm::Value* group_key,
1437 llvm::Value* key_size_lv,
1439 const int32_t row_size_quad) {
1446 LL_BUILDER.CreateCall(perfect_hash_func, std::vector<llvm::Value*>{group_key});
1450 const std::string set_matching_func_name{
1451 "set_matching_group_value_perfect_hash_columnar"};
1452 const std::vector<llvm::Value*> set_matching_func_arg{
1459 emitCall(set_matching_func_name, set_matching_func_arg);
1461 return std::make_tuple(groups_buffer, hash_lv);
1464 return std::make_tuple(
emitCall(
"get_matching_group_value_perfect_hash_keyless",
1465 {groups_buffer, hash_lv,
LL_INT(row_size_quad)}),
1468 return std::make_tuple(
1470 "get_matching_group_value_perfect_hash",
1471 {groups_buffer, hash_lv, group_key, key_size_lv,
LL_INT(row_size_quad)}),
1477 std::tuple<llvm::Value*, llvm::Value*>
1480 llvm::Value* groups_buffer,
1481 llvm::Value* group_key,
1482 llvm::Value* key_size_lv,
1484 const size_t key_width,
1485 const int32_t row_size_quad) {
1487 if (group_key->getType() != llvm::Type::getInt64PtrTy(
LL_CONTEXT)) {
1488 CHECK(key_width ==
sizeof(int32_t));
1492 std::vector<llvm::Value*> func_args{
1497 LL_INT(static_cast<int32_t>(key_width))};
1498 std::string func_name{
"get_group_value"};
1500 func_name +=
"_columnar_slot";
1502 func_args.push_back(
LL_INT(row_size_quad));
1505 func_name +=
"_with_watchdog";
1508 return std::make_tuple(groups_buffer,
emitCall(func_name, func_args));
1510 return std::make_tuple(
emitCall(func_name, func_args),
nullptr);
1517 auto ft = llvm::FunctionType::get(
1521 auto key_hash_func = llvm::Function::Create(ft,
1522 llvm::Function::ExternalLinkage,
1525 executor_->cgen_state_->helper_functions_.push_back(key_hash_func);
1527 auto& key_buff_arg = *key_hash_func->args().begin();
1528 llvm::Value* key_buff_lv = &key_buff_arg;
1529 auto bb = llvm::BasicBlock::Create(
LL_CONTEXT,
"entry", key_hash_func);
1530 llvm::IRBuilder<> key_hash_func_builder(bb);
1532 std::vector<int64_t> cardinalities;
1534 auto col_range_info =
1541 auto* gep = key_hash_func_builder.CreateGEP(
1542 key_buff_lv->getType()->getScalarType()->getPointerElementType(),
1546 key_hash_func_builder.CreateLoad(gep->getType()->getPointerElementType(), gep);
1547 auto col_range_info =
1550 key_hash_func_builder.CreateSub(key_comp_lv,
LL_INT(col_range_info.min));
1551 if (col_range_info.bucket) {
1553 key_hash_func_builder.CreateSDiv(crt_term_lv,
LL_INT(col_range_info.bucket));
1555 for (
size_t prev_dim_idx = 0; prev_dim_idx < dim_idx; ++prev_dim_idx) {
1556 crt_term_lv = key_hash_func_builder.CreateMul(crt_term_lv,
1557 LL_INT(cardinalities[prev_dim_idx]));
1559 hash_lv = key_hash_func_builder.CreateAdd(hash_lv, crt_term_lv);
1562 key_hash_func_builder.CreateRet(
1564 return key_hash_func;
1569 llvm::Value* target) {
1571 const auto& agg_type = agg_info.
sql_type;
1572 const size_t chosen_bytes = agg_type.
get_size();
1574 bool need_conversion{
false};
1575 llvm::Value* arg_null{
nullptr};
1576 llvm::Value* agg_null{
nullptr};
1577 llvm::Value* target_to_cast{target};
1578 if (arg_type.
is_fp()) {
1579 arg_null =
executor_->cgen_state_->inlineFpNull(arg_type);
1580 if (agg_type.is_fp()) {
1581 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1582 if (!static_cast<llvm::ConstantFP*>(arg_null)->isExactlyValue(
1583 static_cast<llvm::ConstantFP*>(agg_null)->getValueAPF())) {
1584 need_conversion =
true;
1591 arg_null =
executor_->cgen_state_->inlineIntNull(arg_type);
1592 if (agg_type.is_fp()) {
1593 agg_null =
executor_->cgen_state_->inlineFpNull(agg_type);
1594 need_conversion =
true;
1595 target_to_cast =
executor_->castToFP(target, arg_type, agg_type);
1597 agg_null =
executor_->cgen_state_->inlineIntNull(agg_type);
1598 if ((static_cast<llvm::ConstantInt*>(arg_null)->getBitWidth() !=
1599 static_cast<llvm::ConstantInt*>(agg_null)->getBitWidth()) ||
1600 (static_cast<llvm::ConstantInt*>(arg_null)->getValue() !=
1601 static_cast<llvm::ConstantInt*>(agg_null)->getValue())) {
1602 need_conversion =
true;
1606 if (need_conversion) {
1607 auto cmp = arg_type.
is_fp() ?
LL_BUILDER.CreateFCmpOEQ(target, arg_null)
1612 executor_->cgen_state_->castToTypeIn(target_to_cast, chosen_bytes << 3));
1624 const auto window_func_context =
1629 : query_mem_desc.
getRowSize() /
sizeof(int64_t);
1630 auto arg_it =
ROW_FUNC->arg_begin();
1631 auto groups_buffer = arg_it++;
1634 window_func_context, code_generator.
posArg(
nullptr));
1635 const auto pos_in_window =
1637 llvm::Value* entry_count_lv =
1639 std::vector<llvm::Value*>
args{
1640 &*groups_buffer, entry_count_lv, pos_in_window, code_generator.
posArg(
nullptr)};
1642 const auto columnar_output_offset =
1649 auto arg_it =
ROW_FUNC->arg_begin();
1650 auto groups_buffer = arg_it++;
1655 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx_in,
1656 llvm::Value* varlen_output_buffer,
1657 const std::vector<llvm::Value*>& agg_out_vec,
1663 auto agg_out_ptr_w_idx = agg_out_ptr_w_idx_in;
1666 const bool is_group_by = std::get<0>(agg_out_ptr_w_idx);
1667 bool can_return_error =
false;
1669 CHECK(agg_out_vec.empty());
1671 CHECK(!agg_out_vec.empty());
1676 llvm::Value* output_buffer_byte_stream{
nullptr};
1677 llvm::Value* out_row_idx{
nullptr};
1680 output_buffer_byte_stream =
LL_BUILDER.CreateBitCast(
1681 std::get<0>(agg_out_ptr_w_idx),
1682 llvm::PointerType::get(llvm::Type::getInt8Ty(
LL_CONTEXT), 0));
1683 output_buffer_byte_stream->setName(
"out_buff_b_stream");
1684 CHECK(std::get<1>(agg_out_ptr_w_idx));
1685 out_row_idx =
LL_BUILDER.CreateZExt(std::get<1>(agg_out_ptr_w_idx),
1687 out_row_idx->setName(
"out_row_idx");
1696 target_builder(target_expr,
executor_, query_mem_desc, co);
1706 output_buffer_byte_stream,
1708 varlen_output_buffer,
1711 return can_return_error;
1718 llvm::Value* output_buffer_byte_stream,
1719 llvm::Value* out_row_idx,
1720 const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
1722 const size_t chosen_bytes,
1723 const size_t agg_out_off,
1724 const size_t target_idx) {
1726 llvm::Value* agg_col_ptr{
nullptr};
1732 CHECK(chosen_bytes == 1 || chosen_bytes == 2 || chosen_bytes == 4 ||
1734 CHECK(output_buffer_byte_stream);
1738 auto out_per_col_byte_idx =
1740 LL_BUILDER.CreateShl(out_row_idx, __lzcnt(chosen_bytes) - 1);
1742 LL_BUILDER.CreateShl(out_row_idx, __builtin_ffs(chosen_bytes) - 1);
1744 auto byte_offset =
LL_BUILDER.CreateAdd(out_per_col_byte_idx,
1745 LL_INT(static_cast<int64_t>(col_off)));
1746 byte_offset->setName(
"out_byte_off_target_" +
std::to_string(target_idx));
1748 output_buffer_byte_stream->getType()->getScalarType()->getPointerElementType(),
1749 output_buffer_byte_stream,
1754 agg_col_ptr->setName(
"out_ptr_target_" +
std::to_string(target_idx));
1756 auto const col_off_in_bytes = query_mem_desc.
getColOffInBytes(agg_out_off);
1757 auto const col_off = col_off_in_bytes / chosen_bytes;
1758 auto const col_rem = col_off_in_bytes % chosen_bytes;
1759 CHECK_EQ(col_rem, 0u) << col_off_in_bytes <<
" % " << chosen_bytes;
1760 CHECK(std::get<1>(agg_out_ptr_w_idx));
1762 std::get<1>(agg_out_ptr_w_idx),
1766 std::get<0>(agg_out_ptr_w_idx),
1769 bit_cast->getType()->getScalarType()->getPointerElementType(),
1775 auto const col_off = col_off_in_bytes / chosen_bytes;
1776 auto const col_rem = col_off_in_bytes % chosen_bytes;
1777 CHECK_EQ(col_rem, 0u) << col_off_in_bytes <<
" % " << chosen_bytes;
1779 std::get<0>(agg_out_ptr_w_idx),
1782 bit_cast->getType()->getScalarType()->getPointerElementType(),
1796 auto estimator_comp_count_lv =
LL_INT(static_cast<int32_t>(estimator_arg.size()));
1798 estimator_comp_count_lv);
1799 int32_t subkey_idx = 0;
1800 for (
const auto& estimator_arg_comp : estimator_arg) {
1801 const auto estimator_arg_comp_lvs =
1802 executor_->groupByColumnCodegen(estimator_arg_comp.get(),
1810 CHECK(!estimator_arg_comp_lvs.original_value);
1811 const auto estimator_arg_comp_lv = estimator_arg_comp_lvs.translated_value;
1814 estimator_arg_comp_lv,
1816 estimator_key_lv->getType()->getScalarType()->getPointerElementType(),
1822 const auto key_bytes =
LL_BUILDER.CreateBitCast(estimator_key_lv, int8_ptr_ty);
1823 const auto estimator_comp_bytes_lv =
1824 LL_INT(static_cast<int32_t>(estimator_arg.size() *
sizeof(int64_t)));
1825 const auto bitmap_size_lv =
1828 {bitmap, &*bitmap_size_lv, key_bytes, &*estimator_comp_bytes_lv});
1837 const int64_t skip_val) {
1838 if (val != skip_val) {
1850 auto* mode_map =
reinterpret_cast<AggMode*
>(*agg);
1855 const size_t target_idx,
1857 std::vector<llvm::Value*>& agg_args,
1862 const auto& arg_ti =
1864 if (arg_ti.is_fp()) {
1865 agg_args.back() =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1868 const auto& count_distinct_descriptor =
1873 agg_args.push_back(
LL_INT(int32_t(count_distinct_descriptor.bitmap_sz_bits)));
1877 agg_args.push_back(base_dev_addr);
1878 agg_args.push_back(base_host_addr);
1879 emitCall(
"agg_approximate_count_distinct_gpu", agg_args);
1881 emitCall(
"agg_approximate_count_distinct", agg_args);
1885 std::string agg_fname{
"agg_count_distinct"};
1887 agg_fname +=
"_bitmap";
1888 agg_args.push_back(
LL_INT(static_cast<int64_t>(count_distinct_descriptor.min_val)));
1890 if (agg_info.skip_null_val) {
1891 auto null_lv =
executor_->cgen_state_->castToTypeIn(
1893 ?
static_cast<llvm::Value*
>(
executor_->cgen_state_->inlineFpNull(arg_ti))
1894 : static_cast<llvm::Value*>(
executor_->cgen_state_->inlineIntNull(arg_ti))),
1896 null_lv =
executor_->cgen_state_->ir_builder_.CreateBitCast(
1898 agg_fname +=
"_skip_val";
1899 agg_args.push_back(null_lv);
1903 agg_fname +=
"_gpu";
1906 agg_args.push_back(base_dev_addr);
1907 agg_args.push_back(base_host_addr);
1908 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.sub_bitmap_count)));
1910 count_distinct_descriptor.bitmapPaddedSizeBytes() %
1911 count_distinct_descriptor.sub_bitmap_count);
1912 agg_args.push_back(
LL_INT(int64_t(count_distinct_descriptor.bitmapPaddedSizeBytes() /
1913 count_distinct_descriptor.sub_bitmap_count)));
1918 executor_->cgen_state_->emitExternalCall(
1919 agg_fname, llvm::Type::getVoidTy(
LL_CONTEXT), agg_args);
1924 const size_t target_idx,
1926 std::vector<llvm::Value*>& agg_args,
1932 llvm::BasicBlock *calc, *skip{
nullptr};
1938 auto* cs =
executor_->cgen_state_.get();
1939 auto& irb = cs->ir_builder_;
1941 auto*
const null_value = cs->castToTypeIn(cs->inlineNull(arg_ti), 64);
1942 auto*
const skip_cond = arg_ti.is_fp()
1943 ? irb.CreateFCmpOEQ(agg_args.back(), null_value)
1944 : irb.CreateICmpEQ(agg_args.back(), null_value);
1945 calc = llvm::BasicBlock::Create(cs->context_,
"calc_approx_quantile");
1946 skip = llvm::BasicBlock::Create(cs->context_,
"skip_approx_quantile");
1947 irb.CreateCondBr(skip_cond, skip, calc);
1948 cs->current_func_->getBasicBlockList().push_back(calc);
1949 irb.SetInsertPoint(calc);
1951 if (!arg_ti.is_fp()) {
1953 agg_args.back() =
executor_->castToFP(agg_args.back(), arg_ti, agg_info.sql_type);
1955 cs->emitExternalCall(
1956 "agg_approx_quantile", llvm::Type::getVoidTy(cs->context_), agg_args);
1959 cs->current_func_->getBasicBlockList().push_back(skip);
1960 irb.SetInsertPoint(skip);
1966 std::vector<llvm::Value*>& agg_args,
1972 llvm::BasicBlock *calc, *skip{
nullptr};
1977 bool const is_fp = arg_ti.is_fp();
1978 auto* cs =
executor_->cgen_state_.get();
1979 auto& irb = cs->ir_builder_;
1981 auto*
const null_value =
1982 is_fp ? cs->inlineNull(arg_ti) : cs->castToTypeIn(cs->inlineNull(arg_ti), 64);
1983 auto*
const skip_cond = is_fp ? irb.CreateFCmpOEQ(agg_args.back(), null_value)
1984 : irb.CreateICmpEQ(agg_args.back(), null_value);
1985 calc = llvm::BasicBlock::Create(cs->context_,
"calc_mode");
1986 skip = llvm::BasicBlock::Create(cs->context_,
"skip_mode");
1987 irb.CreateCondBr(skip_cond, skip, calc);
1988 cs->current_func_->getBasicBlockList().push_back(calc);
1989 irb.SetInsertPoint(calc);
1992 auto*
const int_type =
get_int_type(8 * arg_ti.get_size(), cs->context_);
1993 agg_args.back() = irb.CreateBitCast(agg_args.back(), int_type);
1996 cs->emitExternalCall(
"agg_mode_func", llvm::Type::getVoidTy(cs->context_), agg_args);
1999 cs->current_func_->getBasicBlockList().push_back(skip);
2000 irb.SetInsertPoint(skip);
2013 return LL_BUILDER.CreateLoad(gep->getType()->getPointerElementType(), gep);
2028 if (target_ti.is_buffer() &&
2029 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
2030 const auto target_lvs =
2031 agg_expr ? code_generator.
codegen(agg_expr->get_arg(),
true, co)
2033 target_expr, !
executor_->plan_state_->allow_lazy_fetch_, co);
2034 if (!func_expr && !arr_expr) {
2037 if (target_ti.is_text_encoding_none()) {
2038 CHECK_EQ(
size_t(3), target_lvs.size());
2039 return {target_lvs[1], target_lvs[2]};
2041 CHECK(target_ti.is_array());
2042 CHECK_EQ(
size_t(1), target_lvs.size());
2043 CHECK(!agg_expr || agg_expr->get_aggtype() ==
kSAMPLE);
2047 const auto& elem_ti = target_ti.get_elem_type();
2049 executor_->cgen_state_->emitExternalCall(
2052 {target_lvs.front(), code_generator.
posArg(target_expr)}),
2053 executor_->cgen_state_->emitExternalCall(
2056 {target_lvs.front(),
2057 code_generator.
posArg(target_expr),
2061 throw std::runtime_error(
2062 "Using array[] operator as argument to an aggregate operator is not "
2065 CHECK(func_expr || arr_expr);
2066 if (dynamic_cast<const Analyzer::FunctionOper*>(target_expr)) {
2067 CHECK_EQ(
size_t(1), target_lvs.size());
2068 const auto prefix = target_ti.get_buffer_name();
2069 CHECK(target_ti.is_array() || target_ti.is_text_encoding_none());
2070 const auto target_lv =
LL_BUILDER.CreateLoad(
2071 target_lvs[0]->getType()->getPointerElementType(), target_lvs[0]);
2075 const auto i8p_ty = llvm::PointerType::get(
2077 const auto ptr =
LL_BUILDER.CreatePointerCast(
2078 LL_BUILDER.CreateExtractValue(target_lv, 0), i8p_ty);
2079 const auto size =
LL_BUILDER.CreateExtractValue(target_lv, 1);
2080 const auto null_flag =
LL_BUILDER.CreateExtractValue(target_lv, 2);
2081 const auto nullcheck_ok_bb =
2083 const auto nullcheck_fail_bb = llvm::BasicBlock::Create(
2087 const auto nullcheck =
LL_BUILDER.CreateICmpEQ(
2088 null_flag,
executor_->cgen_state_->llInt(static_cast<int8_t>(1)));
2089 LL_BUILDER.CreateCondBr(nullcheck, nullcheck_fail_bb, nullcheck_ok_bb);
2094 auto result_phi =
LL_BUILDER.CreatePHI(i8p_ty, 2, prefix +
"_ptr_return");
2095 result_phi->addIncoming(ptr, nullcheck_ok_bb);
2096 const auto null_arr_sentinel =
LL_BUILDER.CreateIntToPtr(
2097 executor_->cgen_state_->llInt(static_cast<int8_t>(0)), i8p_ty);
2098 result_phi->addIncoming(null_arr_sentinel, nullcheck_fail_bb);
2100 executor_->cgen_state_->emitExternalCall(
2101 "register_buffer_with_executor_rsm",
2102 llvm::Type::getVoidTy(
executor_->cgen_state_->context_),
2105 LL_BUILDER.SetInsertPoint(nullcheck_fail_bb);
2109 return {result_phi, size};
2111 CHECK_EQ(
size_t(2), target_lvs.size());
2112 return {target_lvs[0], target_lvs[1]};
2115 if (target_ti.is_geometry() &&
2116 !
executor_->plan_state_->isLazyFetchColumn(target_expr)) {
2117 auto generate_coord_lvs =
2118 [&](
auto* selected_target_expr,
2119 bool const fetch_columns) -> std::vector<llvm::Value*> {
2120 const auto target_lvs =
2121 code_generator.
codegen(selected_target_expr, fetch_columns, co);
2122 if (dynamic_cast<const Analyzer::GeoOperator*>(target_expr) &&
2129 if (geo_uoper || geo_binoper) {
2131 CHECK_EQ(2 * static_cast<size_t>(target_ti.get_physical_coord_cols()),
2135 CHECK_EQ(static_cast<size_t>(target_ti.get_physical_coord_cols()),
2141 std::vector<llvm::Value*> coords;
2143 for (
const auto& target_lv : target_lvs) {
2149 const size_t elem_sz = ctr == 0 ? 1 : 4;
2151 int32_t fixlen = -1;
2152 if (target_ti.get_type() ==
kPOINT) {
2155 const auto coords_cd =
executor_->getPhysicalColumnDescriptor(col_var, 1);
2156 if (coords_cd && coords_cd->columnType.get_type() ==
kARRAY) {
2157 fixlen = coords_cd->columnType.get_size();
2162 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2163 "fast_fixlen_array_buff",
2165 {target_lv, code_generator.
posArg(selected_target_expr)}));
2166 auto fixed_len_lv =
executor_->cgen_state_->emitExternalCall(
2167 "determine_fixed_array_len",
2169 {target_lv,
executor_->cgen_state_->llInt(int64_t(fixlen))});
2170 coords.push_back(fixed_len_lv);
2173 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2176 {target_lv, code_generator.
posArg(selected_target_expr)}));
2177 coords.push_back(
executor_->cgen_state_->emitExternalCall(
2181 code_generator.
posArg(selected_target_expr),
2188 return generate_coord_lvs(agg_expr->get_arg(),
true);
2190 return generate_coord_lvs(target_expr,
2191 !
executor_->plan_state_->allow_lazy_fetch_);
2195 bool fetch_column = !
executor_->plan_state_->allow_lazy_fetch_;
2196 return agg_expr ? code_generator.codegen(agg_expr->get_arg(),
true, co)
2197 : code_generator.codegen(target_expr, fetch_column, co);
2201 const std::vector<llvm::Value*>&
args) {
2203 return executor_->cgen_state_->emitCall(fname, args);
2208 auto zero_const = llvm::ConstantInt::get(retCode->getType(), 0,
true);
2209 auto rc_check_condition =
executor_->cgen_state_->ir_builder_.CreateICmp(
2210 llvm::ICmpInst::ICMP_EQ, retCode, zero_const);
2212 executor_->cgen_state_->emitErrorCheck(rc_check_condition, retCode,
"rc");
2229 const auto grouped_col_expr =
2231 if (!grouped_col_expr) {
2234 const auto& column_key = grouped_col_expr->
getColumnKey();
2235 if (column_key.table_id <= 0) {
2239 {column_key.db_id, column_key.table_id});
2240 if (td->shardedColumnId == column_key.column_id) {
RUNTIME_EXPORT void agg_approx_quantile(int64_t *agg, const double val)
const Analyzer::Expr * agg_arg(const Analyzer::Expr *expr)
std::vector< Analyzer::Expr * > target_exprs
SqlWindowFunctionKind getKind() const
ExecutorDeviceType device_type
size_t g_watchdog_baseline_max_groups
bool constrained_not_null(const Analyzer::Expr *expr, const std::list< std::shared_ptr< Analyzer::Expr >> &quals)
robin_hood::unordered_set< int64_t > CountDistinctSet
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
llvm::Value * getAdditionalLiteral(const int32_t off)
ColRangeInfo get_expr_range_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const Analyzer::Expr *expr, Executor *executor)
llvm::BasicBlock * cond_false_
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
HOST DEVICE int get_size() const
size_t getEntryCount() const
static bool colvar_comp(const ColumnVar *l, const ColumnVar *r)
RUNTIME_EXPORT void agg_count_distinct(int64_t *agg, const int64_t val)
int hll_size_for_rate(const int err_percent)
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context)
boost::multiprecision::number< boost::multiprecision::cpp_int_backend< 64, 64, boost::multiprecision::signed_magnitude, boost::multiprecision::checked, void >> checked_int64_t
bool is_column_range_too_big_for_perfect_hash(const ColRangeInfo &col_range_info, const int64_t max_entry_count)
void collect_column_var(std::set< const ColumnVar *, bool(*)(const ColumnVar *, const ColumnVar *)> &colvar_set, bool include_agg) const override
KeylessInfo get_keyless_info(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const bool is_group_by, Executor *executor)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
Streaming Top N algorithm.
bool mustUseBaselineSort() const
std::ostream & operator<<(std::ostream &os, const SessionInfo &session_info)
void mark_function_always_inline(llvm::Function *func)
ColRangeInfo getColRangeInfo()
bool hasVarlenOutput() const
QueryDescriptionType hash_type_
llvm::Value * posArg(const Analyzer::Expr *) const
const TableDescriptor * get_metadata_for_table(const ::shared::TableKey &table_key, bool populate_fragmenter)
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
bool hasKeylessHash() const
int64_t get_agg_initial_val(const SQLAgg agg, const SQLTypeInfo &ti, const bool enable_compaction, const unsigned min_byte_width_to_compact)
llvm::Value * codegenVarlenOutputBuffer(const QueryMemoryDescriptor &query_mem_desc)
static std::unique_ptr< QueryMemoryDescriptor > init(const Executor *executor, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ColRangeInfo &col_range_info, const KeylessInfo &keyless_info, const bool allow_multifrag, const ExecutorDeviceType device_type, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, const size_t shard_count, const size_t max_groups_buffer_entry_count, RenderInfo *render_info, const CountDistinctDescriptors count_distinct_descriptors, const bool must_use_baseline_sort, const bool output_columnar_hint, const bool streaming_top_n_hint, const bool threads_can_reuse_group_by_buffers)
size_t getEffectiveKeyWidth() const
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
bool with_dynamic_watchdog
CountDistinctDescriptors init_count_distinct_descriptors(const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, const ColRangeInfo &group_by_range_info, const ExecutorDeviceType device_type, Executor *executor)
size_t get_heap_key_slot_index(const std::vector< Analyzer::Expr * > &target_exprs, const size_t target_idx)
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
bool takes_float_argument(const TargetInfo &target_info)
#define LLVM_ALIGN(alignment)
RUNTIME_EXPORT void agg_mode_func(int64_t *agg, const int64_t val)
bool has_count_distinct(const RelAlgExecutionUnit &ra_exe_unit)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
CountDistinctImplType impl_type_
llvm::Type * get_int_type(const int width, llvm::LLVMContext &context)
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
size_t getRowSize() const
Helpers for codegen of target expressions.
size_t getColOnlyOffInBytes(const size_t col_idx) const
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
int64_t getMaxVal() const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
llvm::Value * codegenWindowPosition(const WindowFunctionContext *window_func_context, llvm::Value *pos_arg)
llvm::LLVMContext & context_
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const std::optional< int64_t > &group_cardinality_estimation)
llvm::Value * get_arg_by_name(llvm::Function *func, const std::string &name)
bool useStreamingTopN() const
std::vector< CountDistinctDescriptor > CountDistinctDescriptors
size_t getGroupbyColCount() const
const ColumnDescriptor * get_column_descriptor_maybe(const shared::ColumnKey &column_key)
RUNTIME_EXPORT void agg_count_distinct_skip_val(int64_t *agg, const int64_t val, const int64_t skip_val)
const JoinQualsPerNestingLevel join_quals
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
std::optional< size_t > limit
std::list< Analyzer::OrderEntry > order_entries
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, llvm::Value *varlen_output_buffer, const std::vector< llvm::Value * > &agg_out_vec, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
void codegen(GroupByAndAggregate *group_by_and_agg, Executor *executor, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, llvm::Value *varlen_output_buffer, DiamondCodegen &diamond_codegen) const
int64_t g_bitmap_memory_limit
bool is_distinct_target(const TargetInfo &target_info)
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
ExpressionRange getExpressionRange(const Analyzer::BinOper *expr, const std::vector< InputTableInfo > &query_infos, const Executor *, boost::optional< std::list< std::shared_ptr< Analyzer::Expr >>> simple_quals)
const std::shared_ptr< Analyzer::Estimator > estimator
This file includes the class specification for the buffer manager (BufferMgr), and related data struc...
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
const SQLTypeInfo & get_type_info() const
QueryDescriptionType getQueryDescriptionType() const
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
ExecutorDeviceType device_type
int64_t getBucket() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
std::vector< llvm::Value * > codegen(const Analyzer::Expr *, const bool fetch_columns, const CompilationOptions &)
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
const std::vector< InputTableInfo > & query_infos_
bool isSingleColumnGroupByWithPerfectHash() const
const shared::ColumnKey & getColumnKey() const
int64_t get_bucketed_cardinality_without_nulls(const ColRangeInfo &col_range_info)
bool expr_is_rowid(const Analyzer::Expr *expr)
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit)
std::unordered_map< size_t, SQLTypeInfo > target_exprs_original_type_infos
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
bool cardinality_estimate_less_than_column_range(const int64_t cardinality_estimate, const ColRangeInfo &col_range_info)
int32_t get_agg_count(const std::vector< Analyzer::Expr * > &target_exprs)
void add(Value const value)
Descriptor for the result set buffer layout.
const std::optional< int64_t > group_cardinality_estimation_
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
std::list< std::shared_ptr< Analyzer::Expr > > quals
bool didOutputColumnar() const
bool usesGetGroupValueFast() const
bool interleavedBins(const ExecutorDeviceType) const
bool threadsShareMemory() const
int64_t getMinVal() const
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
Estimators to be used when precise cardinality isn't useful.
int64_t get_epoch_days_from_seconds(const int64_t seconds)
RUNTIME_EXPORT ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)
static size_t getBaselineThreshold(bool for_count_distinct, ExecutorDeviceType device_type)
uint32_t log2_bytes(const uint32_t bytes)
HOST DEVICE bool get_notnull() const
const RelAlgExecutionUnit & ra_exe_unit_
size_t getColOffInBytes(const size_t col_idx) const
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
std::list< std::shared_ptr< Analyzer::Expr > > simple_quals