38 #include <boost/math/special_functions/fpclassify.hpp>
47 const int8_t compact_sz1,
49 const int8_t compact_sz2,
54 const auto actual_compact_sz1 = float_argument_input ?
sizeof(float) : compact_sz1;
56 if (agg_ti.is_integer() || agg_ti.is_decimal()) {
58 }
else if (agg_ti.is_fp()) {
59 switch (actual_compact_sz1) {
61 double d = *
reinterpret_cast<const double*
>(ptr1);
62 sum = *
reinterpret_cast<const int64_t*
>(may_alias_ptr(&d));
66 double d = *
reinterpret_cast<const float*
>(ptr1);
67 sum = *
reinterpret_cast<const int64_t*
>(may_alias_ptr(&d));
84 const std::vector<TargetInfo>& targets,
85 const size_t slot_idx,
86 const bool separate_varlen_storage) {
89 size_t agg_col_idx{0};
90 for (
size_t target_idx = 0; target_idx < targets.size(); ++target_idx) {
91 if (agg_col_idx == slot_idx) {
94 CHECK_LT(agg_col_idx, buffer_col_count);
95 const auto& agg_info = targets[target_idx];
98 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
99 if (agg_col_idx + 1 == slot_idx) {
103 crt_col_ptr, query_mem_desc, agg_col_idx + 1);
105 agg_col_idx =
advance_slot(agg_col_idx, agg_info, separate_varlen_storage);
120 const size_t global_entry_idx,
121 const bool translate_strings,
123 const bool fixup_count_distinct_pointers,
124 const std::vector<bool>& targets_to_skip )
const {
125 const auto storage_lookup_result =
126 fixup_count_distinct_pointers
127 ? StorageLookupResult{storage_.get(), global_entry_idx, 0}
128 : findStorage(global_entry_idx);
129 const auto storage = storage_lookup_result.storage_ptr;
130 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
131 if (!fixup_count_distinct_pointers && storage->isEmptyEntry(local_entry_idx)) {
134 const auto buff = storage->buff_;
136 std::vector<TargetValue> row;
137 size_t agg_col_idx = 0;
138 int8_t* rowwise_target_ptr{
nullptr};
139 int8_t* keys_ptr{
nullptr};
140 const int8_t* crt_col_ptr{
nullptr};
141 if (query_mem_desc_.didOutputColumnar()) {
143 crt_col_ptr =
get_cols_ptr(buff, storage->query_mem_desc_);
146 const auto key_bytes_with_padding =
148 rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
150 for (
size_t target_idx = 0; target_idx < storage->targets_.size(); ++target_idx) {
151 const auto& agg_info = storage->targets_[target_idx];
152 if (query_mem_desc_.didOutputColumnar()) {
153 if (
UNLIKELY(!targets_to_skip.empty())) {
154 row.push_back(!targets_to_skip[target_idx]
155 ? getTargetValueFromBufferColwise(crt_col_ptr,
157 storage->query_mem_desc_,
167 row.push_back(getTargetValueFromBufferColwise(crt_col_ptr,
169 storage->query_mem_desc_,
181 storage->query_mem_desc_,
182 separate_varlen_storage_valid_);
184 if (
UNLIKELY(!targets_to_skip.empty())) {
185 row.push_back(!targets_to_skip[target_idx]
186 ? getTargetValueFromBufferRowwise(rowwise_target_ptr,
194 fixup_count_distinct_pointers)
197 row.push_back(getTargetValueFromBufferRowwise(rowwise_target_ptr,
205 fixup_count_distinct_pointers));
211 separate_varlen_storage_valid_);
213 agg_col_idx =
advance_slot(agg_col_idx, agg_info, separate_varlen_storage_valid_);
220 const size_t col_idx,
221 const bool translate_strings,
222 const bool decimal_to_double )
const {
223 std::lock_guard<std::mutex> lock(row_iteration_mutex_);
225 for (
size_t i = 0; i < row_idx; ++i) {
226 auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
227 CHECK(!crt_row.empty());
229 auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
230 CHECK(!crt_row.empty());
231 return crt_row[col_idx];
235 const auto storage_lookup_result = findStorage(global_entry_idx);
236 const auto storage = storage_lookup_result.storage_ptr;
237 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238 if (storage->isEmptyEntry(local_entry_idx)) {
241 const auto buff = storage->buff_;
243 CHECK(!query_mem_desc_.didOutputColumnar());
244 const auto keys_ptr =
row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245 const auto key_bytes_with_padding =
247 const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248 const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
257 const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
259 const auto ival_ptr = boost::get<int64_t>(scalar_tv);
261 return {*ival_ptr,
true};
265 if (logical_index >= entryCount()) {
268 const auto entry_idx =
269 permutation_.empty() ? logical_index : permutation_[logical_index];
270 return getRowAt(entry_idx,
true,
false,
false);
274 const size_t logical_index,
275 const std::vector<bool>& targets_to_skip )
const {
276 if (logical_index >= entryCount()) {
279 const auto entry_idx =
280 permutation_.empty() ? logical_index : permutation_[logical_index];
281 return getRowAt(entry_idx,
false,
false,
false, targets_to_skip);
285 if (logical_index >= entryCount()) {
288 const auto entry_idx =
289 permutation_.empty() ? logical_index : permutation_[logical_index];
290 const auto storage_lookup_result = findStorage(entry_idx);
291 const auto storage = storage_lookup_result.storage_ptr;
292 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293 return storage->isEmptyEntry(local_entry_idx);
297 const bool decimal_to_double)
const {
298 std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299 if (!storage_ && !just_explain_) {
302 return getNextRowUnlocked(translate_strings, decimal_to_double);
306 const bool translate_strings,
307 const bool decimal_to_double)
const {
309 if (fetched_so_far_) {
313 return {explanation_};
315 return getNextRowImpl(translate_strings, decimal_to_double);
319 const bool decimal_to_double)
const {
320 size_t entry_buff_idx = 0;
322 if (keep_first_ && fetched_so_far_ >= drop_first_ + keep_first_) {
326 entry_buff_idx = advanceCursorToNextEntry();
328 if (crt_row_buff_idx_ >= entryCount()) {
329 CHECK_EQ(entryCount(), crt_row_buff_idx_);
335 }
while (drop_first_ && fetched_so_far_ <= drop_first_);
337 auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double,
false);
346 const int8_t* col1_ptr,
347 const int8_t compact_sz1) {
348 return col1_ptr + compact_sz1 * entry_idx;
356 return static_cast<int32_t
>(ival);
358 return static_cast<int16_t
>(ival);
360 return static_cast<int8_t
>(ival);
372 for (
size_t storage_idx = 0; storage_idx <
result_set_->appended_storage_.size() + 1;
376 const int8_t* rowwise_target_ptr{0};
378 size_t agg_col_idx = 0;
379 for (
size_t target_idx = 0; target_idx <
result_set_->storage_->targets_.size();
381 const auto& agg_info =
result_set_->storage_->targets_[target_idx];
383 auto ptr1 = rowwise_target_ptr;
384 const auto compact_sz1 =
385 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
386 ?
result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
389 const int8_t* ptr2{
nullptr};
390 int8_t compact_sz2{0};
391 if ((agg_info.is_agg && agg_info.agg_kind ==
kAVG)) {
392 ptr2 = ptr1 + compact_sz1;
394 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
396 ptr2 = ptr1 + compact_sz1;
397 if (!
result_set_->separate_varlen_storage_valid_) {
401 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
406 static_cast<size_t>(compact_sz1),
408 static_cast<size_t>(compact_sz2)});
417 agg_col_idx, agg_info,
result_set_->separate_varlen_storage_valid_);
426 const size_t entry_idx,
427 const size_t target_logical_idx,
430 const int8_t* rowwise_target_ptr{
nullptr};
431 const int8_t* keys_ptr{
nullptr};
433 const size_t storage_idx = storage_lookup_result.
storage_idx;
435 CHECK_LT(storage_idx, offsets_for_storage_.size());
436 CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
438 const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
439 const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
440 const auto& type_info = agg_info.sql_type;
442 keys_ptr = get_rowwise_ptr(buff, entry_idx);
443 rowwise_target_ptr = keys_ptr + key_bytes_with_padding_;
444 auto ptr1 = rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr1);
445 if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
446 if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
448 result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
455 storage_lookup_result);
456 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
457 CHECK(offsets_for_target.ptr2);
459 rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr2);
463 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_NONE) {
464 CHECK(!agg_info.is_agg);
465 if (!result_set_->lazy_fetch_info_.empty()) {
466 CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
467 const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
468 if (col_lazy_fetch.is_lazily_fetched) {
472 if (result_set_->separate_varlen_storage_valid_) {
478 result_set_->serialized_varlen_buffer_.size());
479 const auto& varlen_buffer_for_fragment =
480 result_set_->serialized_varlen_buffer_[storage_lookup_result.
storage_idx];
481 CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
484 CHECK(offsets_for_target.ptr2);
486 rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr2);
489 return result_set_->getVarlenOrderEntry(i1, str_len);
492 type_info.is_fp() ? i1 :
int_resize_cast(i1, type_info.get_logical_size()));
498 const auto key_width = result_set_->query_mem_desc_.getEffectiveKeyWidth();
499 for (
size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
501 offsets_for_storage_.emplace_back();
503 const int8_t* buff = storage_idx == 0
504 ? result_set_->storage_->buff_
505 : result_set_->appended_storage_[storage_idx - 1]->buff_;
508 const auto& crt_query_mem_desc =
510 ? result_set_->storage_->query_mem_desc_
511 : result_set_->appended_storage_[storage_idx - 1]->query_mem_desc_;
512 const int8_t* crt_col_ptr =
get_cols_ptr(buff, crt_query_mem_desc);
514 size_t agg_col_idx = 0;
515 for (
size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
517 const auto& agg_info = result_set_->storage_->targets_[target_idx];
519 const auto compact_sz1 =
520 crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
521 ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
525 crt_col_ptr, crt_query_mem_desc, agg_col_idx);
526 const bool uses_two_slots = (agg_info.is_agg && agg_info.agg_kind ==
kAVG) ||
528 const auto col2_ptr = uses_two_slots ? next_col_ptr :
nullptr;
529 const auto compact_sz2 =
531 ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx + 1)
534 offsets_for_storage_[storage_idx].push_back(
536 static_cast<size_t>(compact_sz1),
538 static_cast<size_t>(compact_sz2)});
540 crt_col_ptr = next_col_ptr;
541 if (uses_two_slots) {
543 crt_col_ptr, crt_query_mem_desc, agg_col_idx + 1);
546 agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
548 CHECK_EQ(offsets_for_storage_[storage_idx].size(),
549 result_set_->storage_->targets_.size());
555 const size_t entry_idx,
556 const size_t target_logical_idx,
558 const size_t storage_idx = storage_lookup_result.
storage_idx;
560 CHECK_LT(storage_idx, offsets_for_storage_.size());
561 CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
563 const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
564 const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
565 const auto& type_info = agg_info.sql_type;
566 auto ptr1 = offsets_for_target.ptr1;
567 if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
568 if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
570 buff + result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
571 result_set_->query_mem_desc_.getEffectiveKeyWidth() *
572 result_set_->query_mem_desc_.entry_count_;
576 const auto i1 = result_set_->lazyReadInt(
579 offsets_for_target.compact_sz1),
581 storage_lookup_result);
582 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
583 CHECK(offsets_for_target.ptr2);
586 entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
587 offsets_for_target.compact_sz2);
591 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_NONE) {
592 CHECK(!agg_info.is_agg);
593 if (!result_set_->lazy_fetch_info_.empty()) {
594 CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
595 const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
596 if (col_lazy_fetch.is_lazily_fetched) {
600 if (result_set_->separate_varlen_storage_valid_) {
606 result_set_->serialized_varlen_buffer_.size());
607 const auto& varlen_buffer_for_fragment =
608 result_set_->serialized_varlen_buffer_[storage_lookup_result.
storage_idx];
609 CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
612 CHECK(offsets_for_target.ptr2);
615 entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
616 offsets_for_target.compact_sz2);
618 return result_set_->getVarlenOrderEntry(i1, i2);
621 type_info.is_fp() ? i1 :
int_resize_cast(i1, type_info.get_logical_size()));
626 const size_t str_len)
const {
627 char* host_str_ptr{
nullptr};
628 std::vector<int8_t> cpu_buffer;
630 cpu_buffer.resize(str_len);
633 auto data_mgr = executor->getDataMgr();
634 auto allocator = std::make_unique<CudaAllocator>(
636 allocator->copyFromDevice(
637 &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
638 host_str_ptr =
reinterpret_cast<char*
>(&cpu_buffer[0]);
641 host_str_ptr =
reinterpret_cast<char*
>(str_ptr);
643 std::string str(host_str_ptr, str_len);
648 const size_t target_logical_idx,
653 if (col_lazy_fetch.is_lazily_fetched) {
656 int64_t ival_copy = ival;
657 auto& frag_col_buffers =
661 auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
664 CHECK(!target_info.is_agg);
665 if (target_info.sql_type.is_string() &&
670 reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
679 std::string fetched_str(reinterpret_cast<char*>(vd.
pointer), vd.
length);
699 const auto storage_lookup_result =
findStorage(entry_idx);
700 const auto storage = storage_lookup_result.storage_ptr;
701 const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
702 if (!storage->isEmptyEntry(fixedup_entry_idx)) {
732 const auto entry_idx =
734 const auto storage_lookup_result =
findStorage(entry_idx);
735 const auto storage = storage_lookup_result.storage_ptr;
736 const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
737 if (!storage->isEmptyEntry(fixedup_entry_idx)) {
756 return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
779 const size_t buff_sz,
780 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner) {
781 std::vector<ScalarTargetValue> values;
782 auto buff_elems =
reinterpret_cast<const T*
>(buff);
783 CHECK_EQ(
size_t(0), buff_sz %
sizeof(
T));
784 const size_t num_elems = buff_sz /
sizeof(
T);
785 for (
size_t i = 0; i < num_elems; ++i) {
786 values.push_back(make_scalar_tv<T>(buff_elems[i]));
793 const size_t buff_sz,
795 const bool translate_strings,
796 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
798 std::vector<ScalarTargetValue> values;
799 CHECK_EQ(
size_t(0), buff_sz %
sizeof(int32_t));
800 const size_t num_elems = buff_sz /
sizeof(int32_t);
801 if (translate_strings) {
802 for (
size_t i = 0; i < num_elems; ++i) {
803 const auto string_id = buff[i];
810 values.emplace_back(sdp->
getString(string_id));
814 ->getOrAddStringDictProxy(dict_id,
false, catalog)
815 ->getString(string_id)));
820 for (
size_t i = 0; i < num_elems; i++) {
821 values.emplace_back(static_cast<int64_t>(buff[i]));
829 const size_t buff_sz,
830 const bool translate_strings,
831 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
835 if (elem_ti.is_string()) {
838 elem_ti.get_comp_param(),
843 switch (elem_ti.get_size()) {
845 return build_array_target_value<int8_t>(buff, buff_sz, row_set_mem_owner);
847 return build_array_target_value<int16_t>(buff, buff_sz, row_set_mem_owner);
849 if (elem_ti.is_fp()) {
850 return build_array_target_value<float>(buff, buff_sz, row_set_mem_owner);
852 return build_array_target_value<int32_t>(buff, buff_sz, row_set_mem_owner);
855 if (elem_ti.is_fp()) {
856 return build_array_target_value<double>(buff, buff_sz, row_set_mem_owner);
858 return build_array_target_value<int64_t>(buff, buff_sz, row_set_mem_owner);
867 template <
class Tuple,
size_t... indices>
869 std::index_sequence<indices...>,
870 const Tuple& tuple) {
871 return std::vector<std::pair<const int8_t*, const int64_t>>{
872 std::make_pair(std::get<2 * indices>(tuple), std::get<2 * indices + 1>(tuple))...};
876 const int64_t varlen_ptr) {
877 auto ad = std::make_unique<ArrayDatum>();
888 template <
typename...
T>
892 constexpr
int num_vals =
sizeof...(vals);
895 "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
896 const auto vals_vector =
make_vals_vector(std::make_index_sequence<num_vals / 2>{},
897 std::make_tuple(vals...));
900 for (
const auto& col_pair : vals_vector) {
911 ad_arr[ctr]->is_null =
false;
915 if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL ||
917 is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length))) {
918 ad_arr[ctr]->is_null =
true;
928 const int64_t length,
930 const int device_id) {
932 std::shared_ptr<int8_t>(
new int8_t[length], std::default_delete<int8_t[]>());
933 auto allocator = std::make_unique<CudaAllocator>(
935 allocator->copyFromDevice(cpu_buf.get(),
reinterpret_cast<int8_t*
>(varlen_ptr), length);
937 return std::make_unique<ArrayDatum>(length, cpu_buf,
false);
942 return [](
const int64_t ptr,
const int64_t length) ->
VarlenDatumPtr {
944 return std::make_unique<VarlenDatum>(length,
reinterpret_cast<int8_t*
>(ptr),
false);
949 const int device_id) {
950 return [data_mgr_ptr, device_id](
const int64_t ptr,
957 return [](
const int64_t ptr,
const int64_t length) ->
VarlenDatumPtr {
959 return std::make_unique<VarlenDatum>(length,
reinterpret_cast<int8_t*
>(ptr),
false);
963 template <
typename...
T>
970 auto ad_arr_generator = [&](
auto datum_fetcher) {
971 constexpr
int num_vals =
sizeof...(vals);
974 "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
975 const auto vals_vector = std::vector<int64_t>{vals...};
979 for (
size_t i = 0; i < vals_vector.size(); i += 2, ctr++) {
980 if (vals_vector[i] == 0) {
983 ad_arr[ctr] = std::make_unique<ArrayDatum>(0,
nullptr,
true);
986 ad_arr[ctr] = datum_fetcher(vals_vector[i], vals_vector[i + 1]);
991 if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL) {
994 is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length)) {
996 }
else if (ad_arr[ctr]->length == 4 *
sizeof(
double)) {
999 is_null = dti.is_null_fixlen_array(ad_arr[ctr]->pointer, ad_arr[ctr]->length);
1001 ad_arr[ctr]->is_null =
is_null;
1007 if (fetch_data_from_gpu) {
1009 return ad_arr_generator(yieldGpuPtrFetcher());
1011 return ad_arr_generator(yieldGpuDatumFetcher(data_mgr, device_id));
1014 return ad_arr_generator(yieldCpuDatumFetcher());
1019 template <SQLTypes GEO_SOURCE_TYPE,
typename GeoTargetFetcher>
1021 template <
typename...
T>
1025 auto ad_arr = GeoTargetFetcher::fetch(geo_ti, return_type, std::forward<T>(vals)...);
1026 static_assert(std::tuple_size<decltype(ad_arr)>::value > 0,
1027 "ArrayDatum array for Geo Target must contain at least one value.");
1031 switch (return_type) {
1033 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1037 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1041 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1046 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1051 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1057 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1068 template <
typename T>
1070 const std::vector<std::vector<T>>& frag_offsets,
1071 const size_t tab_or_col_idx,
1072 const int64_t global_idx) {
1074 for (int64_t frag_id = frag_offsets.size() - 1; frag_id > 0; --frag_id) {
1075 CHECK_LT(tab_or_col_idx, frag_offsets[frag_id].size());
1076 const auto frag_off =
static_cast<int64_t
>(frag_offsets[frag_id][tab_or_col_idx]);
1077 if (frag_off < global_idx) {
1078 return {frag_id, global_idx - frag_off};
1087 const size_t col_logical_idx,
1088 int64_t& global_idx)
const {
1091 int64_t frag_id = 0;
1092 int64_t local_idx = global_idx;
1103 global_idx = local_idx;
1112 auto storage_lookup_result =
findStorage(entry_idx);
1113 CHECK(storage_lookup_result.storage_ptr);
1114 return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1122 int8_t* output_buffer,
1123 const size_t output_buffer_size)
const {
1126 CHECK(output_buffer_size > 0);
1127 CHECK(output_buffer);
1129 size_t out_buff_offset = 0;
1132 const size_t crt_storage_row_count =
storage_->query_mem_desc_.getEntryCount();
1133 const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1134 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(column_idx);
1135 const int8_t* storage_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1136 CHECK(crt_buffer_size <= output_buffer_size);
1137 std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1139 out_buff_offset += crt_buffer_size;
1143 const size_t crt_storage_row_count =
1145 if (crt_storage_row_count == 0) {
1149 CHECK_LT(out_buff_offset, output_buffer_size);
1150 const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1151 const size_t column_offset =
1153 const int8_t* storage_buffer =
1155 CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1156 std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1158 out_buff_offset += crt_buffer_size;
1162 template <
typename ENTRY_TYPE, QueryDescriptionType QUERY_TYPE,
bool COLUMNAR_FORMAT>
1164 const size_t target_idx,
1165 const size_t slot_idx)
const {
1167 if constexpr (COLUMNAR_FORMAT) {
1168 return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1170 return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1173 if constexpr (COLUMNAR_FORMAT) {
1174 return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1176 return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1184 #define DEF_GET_ENTRY_AT(query_type, columnar_output) \
1185 template DATA_T ResultSet::getEntryAt<DATA_T, query_type, columnar_output>( \
1186 const size_t row_idx, const size_t target_idx, const size_t slot_idx) const;
1188 #define DATA_T int64_t
1195 #define DATA_T int32_t
1202 #define DATA_T int16_t
1209 #define DATA_T int8_t
1216 #define DATA_T float
1223 #define DATA_T double
1230 #undef DEF_GET_ENTRY_AT
1238 template <
typename ENTRY_TYPE>
1240 const size_t target_idx,
1241 const size_t slot_idx)
const {
1242 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1243 const int8_t* storage_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1244 return reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer)[row_idx];
1253 template <
typename ENTRY_TYPE>
1255 const size_t target_idx,
1256 const size_t slot_idx)
const {
1257 const size_t row_offset =
storage_->query_mem_desc_.getRowSize() * row_idx;
1258 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1259 const int8_t* storage_buffer =
1260 storage_->getUnderlyingBuffer() + row_offset + column_offset;
1261 return *
reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer);
1270 template <
typename ENTRY_TYPE>
1272 const size_t target_idx,
1273 const size_t slot_idx)
const {
1275 const auto key_width =
storage_->query_mem_desc_.getEffectiveKeyWidth();
1278 const auto column_offset =
1279 (
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1280 ?
storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1281 :
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1282 const auto storage_buffer = keys_ptr + column_offset;
1283 return *
reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer);
1292 template <
typename ENTRY_TYPE>
1294 const size_t target_idx,
1295 const size_t slot_idx)
const {
1297 const auto key_width =
storage_->query_mem_desc_.getEffectiveKeyWidth();
1298 const auto column_offset =
1299 (
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1300 ?
storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1301 :
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1302 storage_->query_mem_desc_.getEntryCount();
1303 const auto column_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1304 return reinterpret_cast<const ENTRY_TYPE*
>(column_buffer)[row_idx];
1309 const int8_t compact_sz1,
1311 const int8_t compact_sz2,
1313 const size_t target_logical_idx,
1314 const bool translate_strings,
1315 const size_t entry_buff_idx)
const {
1318 if (varlen_ptr < 0) {
1329 const auto& varlen_buffer_for_storage =
1331 CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1332 return varlen_buffer_for_storage[varlen_ptr];
1336 CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1340 reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1341 varlen_buffer[varlen_ptr].size(),
1352 if (col_lazy_fetch.is_lazily_fetched) {
1355 auto& frag_col_buffers =
1356 getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1358 auto col_buf =
const_cast<int8_t*
>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1362 reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr,
false, &vd, &is_end);
1369 std::string fetched_str(reinterpret_cast<char*>(vd.
pointer), vd.
length);
1377 auto status = m.
getItem(varlen_ptr, length, ad.pointer, ad.is_null);
1378 CHECK_EQ(status, FlatBufferManager::Status::Success);
1383 reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1390 if (ad.length > 0) {
1413 std::vector<int8_t> cpu_buffer;
1415 cpu_buffer.resize(length);
1418 auto data_mgr = executor->getDataMgr();
1419 auto allocator = std::make_unique<CudaAllocator>(
1422 allocator->copyFromDevice(
1423 &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1424 varlen_ptr =
reinterpret_cast<int64_t
>(&cpu_buffer[0]);
1428 reinterpret_cast<const int8_t*>(varlen_ptr),
1434 return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1444 throw std::runtime_error(
"Column target at index " +
std::to_string(col_idx) +
1445 " is not a geo column. It is of type " +
1446 targets_[col_idx].sql_type.get_type_name() +
".");
1449 const auto& target_info =
targets_[col_idx];
1469 const size_t slot_idx,
1471 const size_t target_logical_idx,
1472 const size_t entry_buff_idx)
const {
1475 auto getNextTargetBufferRowWise = [&](
const size_t slot_idx,
const size_t range) {
1479 auto getNextTargetBufferColWise = [&](
const size_t slot_idx,
const size_t range) {
1480 const auto storage_info =
findStorage(entry_buff_idx);
1481 auto crt_geo_col_ptr = geo_target_ptr;
1482 for (
size_t i = slot_idx; i < slot_idx + range; i++) {
1484 crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1487 return crt_geo_col_ptr +
1488 storage_info.fixedup_entry_idx *
1489 storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1493 auto getNextTargetBuffer = [&](
const size_t slot_idx,
const size_t range) {
1495 ? getNextTargetBufferColWise(slot_idx, range)
1496 : getNextTargetBufferRowWise(slot_idx, range);
1499 auto getCoordsDataPtr = [&](
const int8_t* geo_target_ptr) {
1504 auto getCoordsLength = [&](
const int8_t* geo_target_ptr) {
1509 auto getRingSizesPtr = [&](
const int8_t* geo_target_ptr) {
1514 auto getRingSizesLength = [&](
const int8_t* geo_target_ptr) {
1519 auto getPolyRingsPtr = [&](
const int8_t* geo_target_ptr) {
1524 auto getPolyRingsLength = [&](
const int8_t* geo_target_ptr) {
1529 auto getFragColBuffers = [&]() -> decltype(
auto) {
1532 auto global_idx = getCoordsDataPtr(geo_target_ptr);
1533 return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1538 auto getDataMgr = [&]() {
1541 return executor->getDataMgr();
1544 auto getSeparateVarlenStorage = [&]() -> decltype(
auto) {
1548 return varlen_buffer;
1552 CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1566 CHECK(varlen_output_info);
1570 reinterpret_cast<int64_t
>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1571 return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1580 const auto& varlen_buffer = getSeparateVarlenStorage();
1581 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1582 varlen_buffer.size());
1584 return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1590 reinterpret_cast<int64_t>(
1591 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1592 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1594 const auto& frag_col_buffers = getFragColBuffers();
1595 return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1599 getCoordsDataPtr(geo_target_ptr));
1601 return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1604 is_gpu_fetch ? getDataMgr() :
nullptr,
1607 getCoordsDataPtr(geo_target_ptr),
1608 getCoordsLength(geo_target_ptr));
1614 const auto& varlen_buffer = getSeparateVarlenStorage();
1615 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1616 varlen_buffer.size());
1618 return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1624 reinterpret_cast<int64_t>(
1625 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1626 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1628 const auto& frag_col_buffers = getFragColBuffers();
1629 return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1633 getCoordsDataPtr(geo_target_ptr));
1635 return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1638 is_gpu_fetch ? getDataMgr() :
nullptr,
1641 getCoordsDataPtr(geo_target_ptr),
1642 getCoordsLength(geo_target_ptr));
1648 const auto& varlen_buffer = getSeparateVarlenStorage();
1649 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1650 varlen_buffer.size());
1652 return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1658 reinterpret_cast<int64_t>(
1659 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1660 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1662 const auto& frag_col_buffers = getFragColBuffers();
1663 return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1667 getCoordsDataPtr(geo_target_ptr));
1669 return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1672 is_gpu_fetch ? getDataMgr() :
nullptr,
1675 getCoordsDataPtr(geo_target_ptr),
1676 getCoordsLength(geo_target_ptr));
1682 const auto& varlen_buffer = getSeparateVarlenStorage();
1683 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1684 varlen_buffer.size());
1686 return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1692 reinterpret_cast<int64_t>(
1693 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1694 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1695 reinterpret_cast<int64_t>(
1696 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1697 static_cast<int64_t>(
1698 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1700 const auto& frag_col_buffers = getFragColBuffers();
1702 return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1706 getCoordsDataPtr(geo_target_ptr),
1708 getCoordsDataPtr(geo_target_ptr));
1710 return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1713 is_gpu_fetch ? getDataMgr() :
nullptr,
1716 getCoordsDataPtr(geo_target_ptr),
1717 getCoordsLength(geo_target_ptr),
1718 getRingSizesPtr(geo_target_ptr),
1719 getRingSizesLength(geo_target_ptr) * 4);
1725 const auto& varlen_buffer = getSeparateVarlenStorage();
1726 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1727 varlen_buffer.size());
1729 return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1735 reinterpret_cast<int64_t>(
1736 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1737 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1738 reinterpret_cast<int64_t>(
1739 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1740 static_cast<int64_t>(
1741 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1743 const auto& frag_col_buffers = getFragColBuffers();
1745 return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1749 getCoordsDataPtr(geo_target_ptr),
1751 getCoordsDataPtr(geo_target_ptr));
1753 return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1756 is_gpu_fetch ? getDataMgr() :
nullptr,
1759 getCoordsDataPtr(geo_target_ptr),
1760 getCoordsLength(geo_target_ptr),
1761 getRingSizesPtr(geo_target_ptr),
1762 getRingSizesLength(geo_target_ptr) * 4);
1768 const auto& varlen_buffer = getSeparateVarlenStorage();
1769 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1770 varlen_buffer.size());
1772 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1778 reinterpret_cast<int64_t>(
1779 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1780 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1781 reinterpret_cast<int64_t>(
1782 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1783 static_cast<int64_t>(
1784 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1785 reinterpret_cast<int64_t>(
1786 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1787 static_cast<int64_t>(
1788 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1790 const auto& frag_col_buffers = getFragColBuffers();
1792 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1796 getCoordsDataPtr(geo_target_ptr),
1798 getCoordsDataPtr(geo_target_ptr),
1800 getCoordsDataPtr(geo_target_ptr));
1802 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1805 is_gpu_fetch ? getDataMgr() :
nullptr,
1808 getCoordsDataPtr(geo_target_ptr),
1809 getCoordsLength(geo_target_ptr),
1810 getRingSizesPtr(geo_target_ptr),
1811 getRingSizesLength(geo_target_ptr) * 4,
1812 getPolyRingsPtr(geo_target_ptr),
1813 getPolyRingsLength(geo_target_ptr) * 4);
1818 throw std::runtime_error(
"Unknown Geometry type encountered: " +
1827 const int8_t compact_sz,
1829 const size_t target_logical_idx,
1830 const bool translate_strings,
1831 const bool decimal_to_double,
1832 const size_t entry_buff_idx)
const {
1833 auto actual_compact_sz = compact_sz;
1834 const auto& type_info = target_info.
sql_type;
1837 actual_compact_sz =
sizeof(float);
1839 actual_compact_sz =
sizeof(double);
1841 if (target_info.
is_agg &&
1847 actual_compact_sz =
sizeof(float);
1852 actual_compact_sz =
sizeof(int64_t);
1856 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_DICT &&
1857 type_info.get_comp_param()) {
1858 actual_compact_sz =
sizeof(int32_t);
1866 if (col_lazy_fetch.is_lazily_fetched) {
1870 auto& frag_col_buffers =
getColumnFrag(storage_idx.first, target_logical_idx, ival);
1871 CHECK_LT(
size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1873 col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1874 if (chosen_type.is_fp()) {
1875 const auto dval = *
reinterpret_cast<const double*
>(may_alias_ptr(&ival));
1876 if (chosen_type.get_type() ==
kFLOAT) {
1884 if (chosen_type.is_fp()) {
1886 return *
reinterpret_cast<double const*
>(ptr) ==
NULL_DOUBLE
1890 switch (actual_compact_sz) {
1892 const auto dval = *
reinterpret_cast<const double*
>(ptr);
1893 return chosen_type.get_type() ==
kFLOAT
1899 return *
reinterpret_cast<const float*
>(ptr);
1905 if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
1906 chosen_type.is_timeinterval()) {
1919 if (chosen_type.is_string() && chosen_type.get_compression() ==
kENCODING_DICT) {
1920 if (translate_strings) {
1921 if (static_cast<int32_t>(ival) ==
1926 if (!chosen_type.get_comp_param()) {
1931 chosen_type.get_comp_param(),
false,
catalog_)
1933 chosen_type.get_comp_param());
1937 return static_cast<int64_t
>(
static_cast<int32_t
>(ival));
1940 if (chosen_type.is_decimal()) {
1941 if (decimal_to_double) {
1942 if (target_info.
is_agg &&
1948 if (!chosen_type.get_notnull() &&
1953 return static_cast<double>(ival) /
exp_to_scale(chosen_type.get_scale());
1966 const int8_t* col_ptr,
1967 const int8_t* keys_ptr,
1969 const size_t local_entry_idx,
1970 const size_t global_entry_idx,
1972 const size_t target_logical_idx,
1973 const size_t slot_idx,
1974 const bool translate_strings,
1975 const bool decimal_to_double)
const {
1977 const auto col1_ptr = col_ptr;
1979 const auto next_col_ptr =
1994 col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
2039 int8_t* rowwise_target_ptr,
2041 const size_t entry_buff_idx,
2043 const size_t target_logical_idx,
2044 const size_t slot_idx,
2045 const bool translate_strings,
2046 const bool decimal_to_double,
2047 const bool fixup_count_distinct_pointers)
const {
2048 if (
UNLIKELY(fixup_count_distinct_pointers)) {
2050 auto count_distinct_ptr_ptr =
reinterpret_cast<int64_t*
>(rowwise_target_ptr);
2051 const auto remote_ptr = *count_distinct_ptr_ptr;
2053 const auto ptr =
storage_->mappedPtr(remote_ptr);
2055 *count_distinct_ptr_ptr = ptr;
2058 const auto& count_distinct_desc =
2060 const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2061 ? count_distinct_desc.bitmapSizeBytes()
2062 : count_distinct_desc.bitmapPaddedSizeBytes();
2065 *count_distinct_ptr_ptr =
reinterpret_cast<int64_t
>(count_distinct_buffer);
2073 rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2076 auto ptr1 = rowwise_target_ptr;
2091 int8_t compact_sz2 = 0;
2143 return isEmptyEntryColumnar(entry_idx, buff);
2150 target_init_vals_.size());
2164 return *
reinterpret_cast<const int32_t*
>(keys_ptr) ==
EMPTY_KEY_32;
2166 return *
reinterpret_cast<const int64_t*
>(keys_ptr) ==
EMPTY_KEY_64;
2179 const int8_t* buff)
const {
2188 CHECK_LT(entry_idx, getEntryCount());
2196 target_init_vals_.size());
2199 const auto entry_buff =
2209 return reinterpret_cast<const int64_t*
>(buff)[entry_idx] ==
EMPTY_KEY_64;
2215 return reinterpret_cast<const int64_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_64;
2217 return reinterpret_cast<const int32_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_32;
2219 return reinterpret_cast<const int16_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_16;
2221 return reinterpret_cast<const int8_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_8;
2233 template <
typename T>
2236 if (!is_empty_fn(r - 1)) {
2242 size_t c = (l + r) / 2;
2243 if (is_empty_fn(c)) {
2267 return reinterpret_cast<const int64_t*
>(buff_)[idx] ==
EMPTY_KEY_64;
2272 return *
reinterpret_cast<const int64_t*
>(keys_ptr) ==
EMPTY_KEY_64;
2278 return isEmptyEntry(entry_idx, buff_);
2283 const bool float_argument_input) {
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
size_t getSlotCount() const
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
size_t getEntryCount() const
bool isEmptyEntry(const size_t entry_idx, const int8_t *buff) const
ENTRY_TYPE getRowWisePerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
class for a per-database catalog. also includes metadata for the current database and the current use...
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
AppendedStorage appended_storage_
ENTRY_TYPE getColumnarPerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
int64_t getTargetGroupbyIndex(const size_t target_idx) const
GeoReturnType geo_return_type_
bool isEmptyEntryColumnar(const size_t entry_idx, const int8_t *buff) const
bool isLogicalSizedColumnsAllowed() const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
TargetValue build_string_array_target_value(const int32_t *buff, const size_t buff_sz, const int dict_id, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog)
size_t make_bin_search(size_t l, size_t r, T &&is_empty_fn)
std::unique_ptr< ArrayDatum > lazy_fetch_chunk(const int8_t *ptr, const int64_t varlen_ptr)
const Catalog_Namespace::Catalog * catalog_
std::vector< TargetValue > getNextRow(const bool translate_strings, const bool decimal_to_double) const
static bool isNull(const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
QueryMemoryDescriptor query_mem_desc_
bool hasKeylessHash() const
std::unique_ptr< ResultSetStorage > storage_
bool is_null_point(const SQLTypeInfo &geo_ti, const int8_t *coords, const size_t coords_sz)
std::string getString(int32_t string_id) const
High-level representation of SQL values.
ENTRY_TYPE getEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
size_t getEffectiveKeyWidth() const
Constants for Builtin SQL Types supported by HEAVY.AI.
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
bool takes_float_argument(const TargetInfo &target_info)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
int64_t lazyReadInt(const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
HOST DEVICE SQLTypes get_type() const
OneIntegerColumnRow getOneColRow(const size_t index) const
TargetValue getTargetValueFromBufferColwise(const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
T advance_target_ptr_row_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
const ResultSet * result_set_
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
const int8_t * advance_col_buff_to_slot(const int8_t *buff, const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const size_t slot_idx, const bool separate_varlen_storage)
Serialization routines for geospatial types.
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool forceFourByteFloat() const
InternalTargetValue getVarlenOrderEntry(const int64_t str_ptr, const size_t str_len) const
const std::vector< TargetInfo > targets_
int8_t groupColWidth(const size_t key_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
CONSTEXPR DEVICE bool is_null(const T &value)
static TargetValue build(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
Classes representing a parse tree.
static auto yieldGpuPtrFetcher()
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
size_t getGroupbyColCount() const
static bool isFlatBuffer(const void *buffer)
size_t targetGroupbyIndicesSize() const
size_t binSearchRowCount() const
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
CountDistinctDescriptors count_distinct_descriptors_
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
bool is_distinct_target(const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
void copyColumnIntoBuffer(const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
Status getItem(int64_t index, int64_t &size, int8_t *&dest, bool &is_null)
bool g_enable_smem_group_by true
static double calculateQuantile(quantile::TDigest *const t_digest)
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
QueryDescriptionType getQueryDescriptionType() const
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
std::vector< TargetValue > getRowAt(const size_t index) const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
bool is_date_in_days() const
int64_t int_resize_cast(const int64_t ival, const size_t sz)
int get_array_context_logical_size() const
bool isGeoColOnGpu(const size_t col_idx) const
void initializeOffsetsForStorage()
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
#define DEF_GET_ENTRY_AT(query_type, columnar_output)
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, Data_Namespace::DataMgr *data_mgr, const bool fetch_data_from_gpu, const int device_id, T &&...vals)
std::string get_type_name() const
boost::variant< std::string, void * > NullableString
CUstream getQueryEngineCudaStreamForDevice(int device_num)
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
static auto yieldGpuDatumFetcher(Data_Namespace::DataMgr *data_mgr_ptr, const int device_id)
const bool is_lazily_fetched
std::vector< std::vector< int64_t > > consistent_frag_sizes_
bool didOutputColumnar() const
const ExecutorDeviceType device_type_
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
bool g_enable_watchdog false
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
ScalarTargetValue make_scalar_tv(const T val)
size_t getBufferSizeBytes(const ExecutorDeviceType device_type) const
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Basic constructors and methods of the row set interface.
bool separate_varlen_storage_valid_
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< std::pair< const int8_t *, const int64_t > > make_vals_vector(std::index_sequence< indices...>, const Tuple &tuple)
static auto yieldCpuDatumFetcher()
T advance_target_ptr_col_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
size_t advanceCursorToNextEntry() const
HOST DEVICE bool get_notnull() const
ENTRY_TYPE getColumnarBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
std::vector< std::vector< TargetOffsets > > offsets_for_storage_
SQLTypeInfo get_elem_type() const
size_t getBufferColSlotCount() const
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
void initializeOffsetsForStorage()
bool global_entry_idx_valid_
std::unique_ptr< VarlenDatum > VarlenDatumPtr
bool isDirectColumnarConversionPossible() const
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
std::unique_ptr< ArrayDatum > fetch_data_from_gpu(int64_t varlen_ptr, const int64_t length, Data_Namespace::DataMgr *data_mgr, const int device_id)
ENTRY_TYPE getRowWiseBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
int32_t getTargetIdxForKey() const
const size_t fixedup_entry_idx
size_t getPrependedGroupColOffInBytes(const size_t group_idx) const