38 #include <boost/math/special_functions/fpclassify.hpp>
47 const int8_t compact_sz1,
49 const int8_t compact_sz2,
54 const auto actual_compact_sz1 = float_argument_input ?
sizeof(float) : compact_sz1;
56 if (agg_ti.is_integer() || agg_ti.is_decimal()) {
58 }
else if (agg_ti.is_fp()) {
59 switch (actual_compact_sz1) {
61 double d = *
reinterpret_cast<const double*
>(ptr1);
62 sum = *
reinterpret_cast<const int64_t*
>(may_alias_ptr(&d));
66 double d = *
reinterpret_cast<const float*
>(ptr1);
67 sum = *
reinterpret_cast<const int64_t*
>(may_alias_ptr(&d));
84 const std::vector<TargetInfo>& targets,
85 const size_t slot_idx,
86 const bool separate_varlen_storage) {
89 size_t agg_col_idx{0};
90 for (
size_t target_idx = 0; target_idx < targets.size(); ++target_idx) {
91 if (agg_col_idx == slot_idx) {
94 CHECK_LT(agg_col_idx, buffer_col_count);
95 const auto& agg_info = targets[target_idx];
98 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
99 if (agg_col_idx + 1 == slot_idx) {
103 crt_col_ptr, query_mem_desc, agg_col_idx + 1);
105 agg_col_idx =
advance_slot(agg_col_idx, agg_info, separate_varlen_storage);
120 const size_t global_entry_idx,
121 const bool translate_strings,
123 const bool fixup_count_distinct_pointers,
124 const std::vector<bool>& targets_to_skip )
const {
125 const auto storage_lookup_result =
126 fixup_count_distinct_pointers
127 ? StorageLookupResult{storage_.get(), global_entry_idx, 0}
128 : findStorage(global_entry_idx);
129 const auto storage = storage_lookup_result.storage_ptr;
130 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
131 if (!fixup_count_distinct_pointers && storage->isEmptyEntry(local_entry_idx)) {
135 const auto buff = storage->buff_;
137 std::vector<TargetValue> row;
138 size_t agg_col_idx = 0;
139 int8_t* rowwise_target_ptr{
nullptr};
140 int8_t* keys_ptr{
nullptr};
141 const int8_t* crt_col_ptr{
nullptr};
142 if (query_mem_desc_.didOutputColumnar()) {
144 crt_col_ptr =
get_cols_ptr(buff, storage->query_mem_desc_);
147 const auto key_bytes_with_padding =
149 rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
151 for (
size_t target_idx = 0; target_idx < storage->targets_.size(); ++target_idx) {
152 const auto& agg_info = storage->targets_[target_idx];
153 if (query_mem_desc_.didOutputColumnar()) {
154 if (
UNLIKELY(!targets_to_skip.empty())) {
155 row.push_back(!targets_to_skip[target_idx]
156 ? getTargetValueFromBufferColwise(crt_col_ptr,
158 storage->query_mem_desc_,
168 row.push_back(getTargetValueFromBufferColwise(crt_col_ptr,
170 storage->query_mem_desc_,
182 storage->query_mem_desc_,
183 separate_varlen_storage_valid_);
185 if (
UNLIKELY(!targets_to_skip.empty())) {
186 row.push_back(!targets_to_skip[target_idx]
187 ? getTargetValueFromBufferRowwise(rowwise_target_ptr,
195 fixup_count_distinct_pointers)
198 row.push_back(getTargetValueFromBufferRowwise(rowwise_target_ptr,
206 fixup_count_distinct_pointers));
212 separate_varlen_storage_valid_);
214 agg_col_idx =
advance_slot(agg_col_idx, agg_info, separate_varlen_storage_valid_);
221 const size_t col_idx,
222 const bool translate_strings,
223 const bool decimal_to_double )
const {
224 std::lock_guard<std::mutex> lock(row_iteration_mutex_);
226 for (
size_t i = 0;
i < row_idx; ++
i) {
227 auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
228 CHECK(!crt_row.empty());
230 auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
231 CHECK(!crt_row.empty());
232 return crt_row[col_idx];
236 const auto storage_lookup_result = findStorage(global_entry_idx);
237 const auto storage = storage_lookup_result.storage_ptr;
238 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
239 if (storage->isEmptyEntry(local_entry_idx)) {
242 const auto buff = storage->buff_;
244 CHECK(!query_mem_desc_.didOutputColumnar());
245 const auto keys_ptr =
row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
246 const auto key_bytes_with_padding =
248 const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
249 const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
258 const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
260 const auto ival_ptr = boost::get<int64_t>(scalar_tv);
262 return {*ival_ptr,
true};
266 if (logical_index >= entryCount()) {
269 const auto entry_idx =
270 permutation_.empty() ? logical_index : permutation_[logical_index];
271 return getRowAt(entry_idx,
true,
false,
false);
275 const size_t logical_index,
276 const std::vector<bool>& targets_to_skip )
const {
277 if (logical_index >= entryCount()) {
280 const auto entry_idx =
281 permutation_.empty() ? logical_index : permutation_[logical_index];
282 return getRowAt(entry_idx,
false,
false,
false, targets_to_skip);
286 if (logical_index >= entryCount()) {
289 const auto entry_idx =
290 permutation_.empty() ? logical_index : permutation_[logical_index];
291 const auto storage_lookup_result = findStorage(entry_idx);
292 const auto storage = storage_lookup_result.storage_ptr;
293 const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
294 return storage->isEmptyEntry(local_entry_idx);
298 const bool decimal_to_double)
const {
299 std::lock_guard<std::mutex> lock(row_iteration_mutex_);
300 if (!storage_ && !just_explain_) {
303 return getNextRowUnlocked(translate_strings, decimal_to_double);
307 const bool translate_strings,
308 const bool decimal_to_double)
const {
310 if (fetched_so_far_) {
314 return {explanation_};
316 return getNextRowImpl(translate_strings, decimal_to_double);
320 const bool decimal_to_double)
const {
321 size_t entry_buff_idx = 0;
323 if (keep_first_ && fetched_so_far_ >= drop_first_ + keep_first_) {
327 entry_buff_idx = advanceCursorToNextEntry();
329 if (crt_row_buff_idx_ >= entryCount()) {
330 CHECK_EQ(entryCount(), crt_row_buff_idx_);
336 }
while (drop_first_ && fetched_so_far_ <= drop_first_);
338 auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double,
false);
347 const int8_t* col1_ptr,
348 const int8_t compact_sz1) {
349 return col1_ptr + compact_sz1 * entry_idx;
357 return static_cast<int32_t
>(ival);
359 return static_cast<int16_t
>(ival);
361 return static_cast<int8_t
>(ival);
373 for (
size_t storage_idx = 0; storage_idx <
result_set_->appended_storage_.size() + 1;
377 const int8_t* rowwise_target_ptr{0};
379 size_t agg_col_idx = 0;
380 for (
size_t target_idx = 0; target_idx <
result_set_->storage_->targets_.size();
382 const auto& agg_info =
result_set_->storage_->targets_[target_idx];
384 auto ptr1 = rowwise_target_ptr;
385 const auto compact_sz1 =
386 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
387 ?
result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
390 const int8_t* ptr2{
nullptr};
391 int8_t compact_sz2{0};
392 if ((agg_info.is_agg && agg_info.agg_kind ==
kAVG)) {
393 ptr2 = ptr1 + compact_sz1;
395 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
397 ptr2 = ptr1 + compact_sz1;
398 if (!
result_set_->separate_varlen_storage_valid_) {
402 result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
407 static_cast<size_t>(compact_sz1),
409 static_cast<size_t>(compact_sz2)});
418 agg_col_idx, agg_info,
result_set_->separate_varlen_storage_valid_);
427 const size_t entry_idx,
428 const size_t target_logical_idx,
431 const int8_t* rowwise_target_ptr{
nullptr};
432 const int8_t* keys_ptr{
nullptr};
434 const size_t storage_idx = storage_lookup_result.
storage_idx;
436 CHECK_LT(storage_idx, offsets_for_storage_.size());
437 CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
439 const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
440 const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
441 const auto& type_info = agg_info.sql_type;
443 keys_ptr = get_rowwise_ptr(buff, entry_idx);
444 rowwise_target_ptr = keys_ptr + key_bytes_with_padding_;
445 auto ptr1 = rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr1);
446 if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
447 if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
449 result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
456 storage_lookup_result);
457 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
458 CHECK(offsets_for_target.ptr2);
460 rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr2);
464 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_NONE) {
465 CHECK(!agg_info.is_agg);
466 if (!result_set_->lazy_fetch_info_.empty()) {
467 CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
468 const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
469 if (col_lazy_fetch.is_lazily_fetched) {
473 if (result_set_->separate_varlen_storage_valid_) {
479 result_set_->serialized_varlen_buffer_.size());
480 const auto& varlen_buffer_for_fragment =
481 result_set_->serialized_varlen_buffer_[storage_lookup_result.
storage_idx];
482 CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
485 CHECK(offsets_for_target.ptr2);
487 rowwise_target_ptr +
reinterpret_cast<size_t>(offsets_for_target.ptr2);
490 return result_set_->getVarlenOrderEntry(i1, str_len);
493 type_info.is_fp() ? i1 :
int_resize_cast(i1, type_info.get_logical_size()));
499 const auto key_width = result_set_->query_mem_desc_.getEffectiveKeyWidth();
500 for (
size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
502 offsets_for_storage_.emplace_back();
504 const int8_t* buff = storage_idx == 0
505 ? result_set_->storage_->buff_
506 : result_set_->appended_storage_[storage_idx - 1]->buff_;
509 const auto& crt_query_mem_desc =
511 ? result_set_->storage_->query_mem_desc_
512 : result_set_->appended_storage_[storage_idx - 1]->query_mem_desc_;
513 const int8_t* crt_col_ptr =
get_cols_ptr(buff, crt_query_mem_desc);
515 size_t agg_col_idx = 0;
516 for (
size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
518 const auto& agg_info = result_set_->storage_->targets_[target_idx];
520 const auto compact_sz1 =
521 crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
522 ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
526 crt_col_ptr, crt_query_mem_desc, agg_col_idx);
527 const bool uses_two_slots = (agg_info.is_agg && agg_info.agg_kind ==
kAVG) ||
529 const auto col2_ptr = uses_two_slots ? next_col_ptr :
nullptr;
530 const auto compact_sz2 =
532 ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx + 1)
535 offsets_for_storage_[storage_idx].push_back(
537 static_cast<size_t>(compact_sz1),
539 static_cast<size_t>(compact_sz2)});
541 crt_col_ptr = next_col_ptr;
542 if (uses_two_slots) {
544 crt_col_ptr, crt_query_mem_desc, agg_col_idx + 1);
547 agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
549 CHECK_EQ(offsets_for_storage_[storage_idx].size(),
550 result_set_->storage_->targets_.size());
556 const size_t entry_idx,
557 const size_t target_logical_idx,
559 const size_t storage_idx = storage_lookup_result.
storage_idx;
561 CHECK_LT(storage_idx, offsets_for_storage_.size());
562 CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
564 const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
565 const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
566 const auto& type_info = agg_info.sql_type;
567 auto ptr1 = offsets_for_target.ptr1;
568 if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
569 if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
571 buff + result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
572 result_set_->query_mem_desc_.getEffectiveKeyWidth() *
573 result_set_->query_mem_desc_.entry_count_;
577 const auto i1 = result_set_->lazyReadInt(
580 offsets_for_target.compact_sz1),
582 storage_lookup_result);
583 if (agg_info.is_agg && agg_info.agg_kind ==
kAVG) {
584 CHECK(offsets_for_target.ptr2);
587 entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
588 offsets_for_target.compact_sz2);
592 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_NONE) {
593 CHECK(!agg_info.is_agg);
594 if (!result_set_->lazy_fetch_info_.empty()) {
595 CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
596 const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
597 if (col_lazy_fetch.is_lazily_fetched) {
601 if (result_set_->separate_varlen_storage_valid_) {
607 result_set_->serialized_varlen_buffer_.size());
608 const auto& varlen_buffer_for_fragment =
609 result_set_->serialized_varlen_buffer_[storage_lookup_result.
storage_idx];
610 CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
613 CHECK(offsets_for_target.ptr2);
616 entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
617 offsets_for_target.compact_sz2);
619 return result_set_->getVarlenOrderEntry(i1, i2);
622 type_info.is_fp() ? i1 :
int_resize_cast(i1, type_info.get_logical_size()));
627 const size_t str_len)
const {
628 char* host_str_ptr{
nullptr};
629 std::vector<int8_t> cpu_buffer;
631 cpu_buffer.resize(str_len);
634 auto& data_mgr = executor->catalog_->getDataMgr();
637 static_cast<CUdeviceptr>(str_ptr),
640 host_str_ptr =
reinterpret_cast<char*
>(&cpu_buffer[0]);
643 host_str_ptr =
reinterpret_cast<char*
>(str_ptr);
645 std::string str(host_str_ptr, str_len);
650 const size_t target_logical_idx,
655 if (col_lazy_fetch.is_lazily_fetched) {
658 int64_t ival_copy = ival;
659 auto& frag_col_buffers =
663 auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
666 CHECK(!target_info.is_agg);
667 if (target_info.sql_type.is_string() &&
672 reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
681 std::string fetched_str(reinterpret_cast<char*>(vd.
pointer), vd.
length);
701 const auto storage_lookup_result =
findStorage(entry_idx);
702 const auto storage = storage_lookup_result.storage_ptr;
703 const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
704 if (!storage->isEmptyEntry(fixedup_entry_idx)) {
734 const auto entry_idx =
736 const auto storage_lookup_result =
findStorage(entry_idx);
737 const auto storage = storage_lookup_result.storage_ptr;
738 const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
739 if (!storage->isEmptyEntry(fixedup_entry_idx)) {
758 return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
781 const size_t buff_sz,
782 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner) {
783 std::vector<ScalarTargetValue> values;
784 auto buff_elems =
reinterpret_cast<const T*
>(buff);
785 CHECK_EQ(
size_t(0), buff_sz %
sizeof(
T));
786 const size_t num_elems = buff_sz /
sizeof(
T);
787 for (
size_t i = 0;
i < num_elems; ++
i) {
788 values.push_back(make_scalar_tv<T>(buff_elems[
i]));
795 const size_t buff_sz,
797 const bool translate_strings,
798 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
800 std::vector<ScalarTargetValue> values;
801 CHECK_EQ(
size_t(0), buff_sz %
sizeof(int32_t));
802 const size_t num_elems = buff_sz /
sizeof(int32_t);
803 if (translate_strings) {
804 for (
size_t i = 0;
i < num_elems; ++
i) {
805 const auto string_id = buff[
i];
812 values.emplace_back(sdp->
getString(string_id));
816 ->getOrAddStringDictProxy(dict_id,
false, catalog)
817 ->getString(string_id)));
822 for (
size_t i = 0;
i < num_elems;
i++) {
823 values.emplace_back(static_cast<int64_t>(buff[
i]));
831 const size_t buff_sz,
832 const bool translate_strings,
833 std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
837 if (elem_ti.is_string()) {
840 elem_ti.get_comp_param(),
845 switch (elem_ti.get_size()) {
847 return build_array_target_value<int8_t>(buff, buff_sz, row_set_mem_owner);
849 return build_array_target_value<int16_t>(buff, buff_sz, row_set_mem_owner);
851 if (elem_ti.is_fp()) {
852 return build_array_target_value<float>(buff, buff_sz, row_set_mem_owner);
854 return build_array_target_value<int32_t>(buff, buff_sz, row_set_mem_owner);
857 if (elem_ti.is_fp()) {
858 return build_array_target_value<double>(buff, buff_sz, row_set_mem_owner);
860 return build_array_target_value<int64_t>(buff, buff_sz, row_set_mem_owner);
869 template <
class Tuple,
size_t... indices>
871 std::index_sequence<indices...>,
872 const Tuple& tuple) {
873 return std::vector<std::pair<const int8_t*, const int64_t>>{
874 std::make_pair(std::get<2 * indices>(tuple), std::get<2 * indices + 1>(tuple))...};
878 const int64_t varlen_ptr) {
879 auto ad = std::make_unique<ArrayDatum>();
890 template <
typename...
T>
894 constexpr
int num_vals =
sizeof...(vals);
897 "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
898 const auto vals_vector =
make_vals_vector(std::make_index_sequence<num_vals / 2>{},
899 std::make_tuple(vals...));
902 for (
const auto& col_pair : vals_vector) {
913 ad_arr[ctr]->is_null =
false;
917 if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL ||
919 is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length))) {
920 ad_arr[ctr]->is_null =
true;
930 const int64_t length,
932 const int device_id) {
933 auto cpu_buf = std::shared_ptr<int8_t>(
new int8_t[length],
FreeDeleter());
935 data_mgr, cpu_buf.get(),
static_cast<CUdeviceptr>(varlen_ptr), length, device_id);
937 return std::make_unique<ArrayDatum>(length, cpu_buf,
false);
942 return [](
const int64_t ptr,
const int64_t length) ->
VarlenDatumPtr {
944 return std::make_unique<VarlenDatum>(length,
reinterpret_cast<int8_t*
>(ptr),
false);
949 const int device_id) {
950 return [data_mgr_ptr, device_id](
const int64_t ptr,
957 return [](
const int64_t ptr,
const int64_t length) ->
VarlenDatumPtr {
959 return std::make_unique<VarlenDatum>(length,
reinterpret_cast<int8_t*
>(ptr),
false);
963 template <
typename...
T>
970 auto ad_arr_generator = [&](
auto datum_fetcher) {
971 constexpr
int num_vals =
sizeof...(vals);
974 "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
975 const auto vals_vector = std::vector<int64_t>{vals...};
979 for (
size_t i = 0;
i < vals_vector.size();
i += 2) {
980 ad_arr[ctr] = datum_fetcher(vals_vector[
i], vals_vector[i + 1]);
985 if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL) {
988 is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length)) {
990 }
else if (ad_arr[ctr]->length == 4 *
sizeof(
double)) {
993 is_null = dti.is_null_fixlen_array(ad_arr[ctr]->pointer, ad_arr[ctr]->length);
995 ad_arr[ctr]->is_null =
is_null;
1002 if (fetch_data_from_gpu) {
1004 return ad_arr_generator(yieldGpuPtrFetcher());
1006 return ad_arr_generator(yieldGpuDatumFetcher(data_mgr, device_id));
1009 return ad_arr_generator(yieldCpuDatumFetcher());
1014 template <SQLTypes GEO_SOURCE_TYPE,
typename GeoTargetFetcher>
1016 template <
typename...
T>
1020 auto ad_arr = GeoTargetFetcher::fetch(geo_ti, return_type, std::forward<T>(vals)...);
1021 static_assert(std::tuple_size<decltype(ad_arr)>::value > 0,
1022 "ArrayDatum array for Geo Target must contain at least one value.");
1026 switch (return_type) {
1028 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1032 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1036 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1041 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1046 if (!geo_ti.
get_notnull() && ad_arr[0]->is_null) {
1052 GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1063 template <
typename T>
1065 const std::vector<std::vector<T>>& frag_offsets,
1066 const size_t tab_or_col_idx,
1067 const int64_t global_idx) {
1069 for (int64_t frag_id = frag_offsets.size() - 1; frag_id > 0; --frag_id) {
1070 CHECK_LT(tab_or_col_idx, frag_offsets[frag_id].size());
1071 const auto frag_off =
static_cast<int64_t
>(frag_offsets[frag_id][tab_or_col_idx]);
1072 if (frag_off < global_idx) {
1073 return {frag_id, global_idx - frag_off};
1082 const size_t col_logical_idx,
1083 int64_t& global_idx)
const {
1086 int64_t frag_id = 0;
1087 int64_t local_idx = global_idx;
1098 global_idx = local_idx;
1111 int8_t* output_buffer,
1112 const size_t output_buffer_size)
const {
1115 CHECK(output_buffer_size > 0);
1116 CHECK(output_buffer);
1118 size_t out_buff_offset = 0;
1121 const size_t crt_storage_row_count =
storage_->query_mem_desc_.getEntryCount();
1122 const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1123 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(column_idx);
1124 const int8_t* storage_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1125 CHECK(crt_buffer_size <= output_buffer_size);
1126 std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1128 out_buff_offset += crt_buffer_size;
1132 const size_t crt_storage_row_count =
1134 if (crt_storage_row_count == 0) {
1138 CHECK_LT(out_buff_offset, output_buffer_size);
1139 const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1140 const size_t column_offset =
1142 const int8_t* storage_buffer =
1144 CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1145 std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1147 out_buff_offset += crt_buffer_size;
1151 template <
typename ENTRY_TYPE, QueryDescriptionType QUERY_TYPE,
bool COLUMNAR_FORMAT>
1153 const size_t target_idx,
1154 const size_t slot_idx)
const {
1156 if constexpr (COLUMNAR_FORMAT) {
1157 return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1159 return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1162 if constexpr (COLUMNAR_FORMAT) {
1163 return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1165 return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1173 #define DEF_GET_ENTRY_AT(query_type, columnar_output) \
1174 template DATA_T ResultSet::getEntryAt<DATA_T, query_type, columnar_output>( \
1175 const size_t row_idx, const size_t target_idx, const size_t slot_idx) const;
1177 #define DATA_T int64_t
1184 #define DATA_T int32_t
1191 #define DATA_T int16_t
1198 #define DATA_T int8_t
1205 #define DATA_T float
1212 #define DATA_T double
1219 #undef DEF_GET_ENTRY_AT
1227 template <
typename ENTRY_TYPE>
1229 const size_t target_idx,
1230 const size_t slot_idx)
const {
1231 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1232 const int8_t* storage_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1233 return reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer)[row_idx];
1242 template <
typename ENTRY_TYPE>
1244 const size_t target_idx,
1245 const size_t slot_idx)
const {
1246 const size_t row_offset =
storage_->query_mem_desc_.getRowSize() * row_idx;
1247 const size_t column_offset =
storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1248 const int8_t* storage_buffer =
1249 storage_->getUnderlyingBuffer() + row_offset + column_offset;
1250 return *
reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer);
1259 template <
typename ENTRY_TYPE>
1261 const size_t target_idx,
1262 const size_t slot_idx)
const {
1264 const auto key_width =
storage_->query_mem_desc_.getEffectiveKeyWidth();
1267 const auto column_offset =
1268 (
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1269 ?
storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1270 :
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1271 const auto storage_buffer = keys_ptr + column_offset;
1272 return *
reinterpret_cast<const ENTRY_TYPE*
>(storage_buffer);
1281 template <
typename ENTRY_TYPE>
1283 const size_t target_idx,
1284 const size_t slot_idx)
const {
1286 const auto key_width =
storage_->query_mem_desc_.getEffectiveKeyWidth();
1287 const auto column_offset =
1288 (
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1289 ?
storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1290 :
storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1291 storage_->query_mem_desc_.getEntryCount();
1292 const auto column_buffer =
storage_->getUnderlyingBuffer() + column_offset;
1293 return reinterpret_cast<const ENTRY_TYPE*
>(column_buffer)[row_idx];
1298 const int8_t compact_sz1,
1300 const int8_t compact_sz2,
1302 const size_t target_logical_idx,
1303 const bool translate_strings,
1304 const size_t entry_buff_idx)
const {
1307 if (varlen_ptr < 0) {
1318 const auto& varlen_buffer_for_storage =
1320 CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1321 return varlen_buffer_for_storage[varlen_ptr];
1325 CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1329 reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1330 varlen_buffer[varlen_ptr].size(),
1341 if (col_lazy_fetch.is_lazily_fetched) {
1344 auto& frag_col_buffers =
1345 getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1350 frag_col_buffers[col_lazy_fetch.local_col_id])),
1361 std::string fetched_str(reinterpret_cast<char*>(vd.
pointer), vd.
length);
1367 frag_col_buffers[col_lazy_fetch.local_col_id])),
1376 if (ad.length > 0) {
1399 std::vector<int8_t> cpu_buffer;
1401 cpu_buffer.resize(length);
1404 auto& data_mgr = executor->catalog_->getDataMgr();
1407 static_cast<CUdeviceptr>(varlen_ptr),
1410 varlen_ptr =
reinterpret_cast<int64_t
>(&cpu_buffer[0]);
1414 reinterpret_cast<const int8_t*>(varlen_ptr),
1420 return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1430 throw std::runtime_error(
"Column target at index " +
std::to_string(col_idx) +
1431 " is not a geo column. It is of type " +
1432 targets_[col_idx].sql_type.get_type_name() +
".");
1435 const auto& target_info =
targets_[col_idx];
1455 const size_t slot_idx,
1457 const size_t target_logical_idx,
1458 const size_t entry_buff_idx)
const {
1461 auto getNextTargetBufferRowWise = [&](
const size_t slot_idx,
const size_t range) {
1465 auto getNextTargetBufferColWise = [&](
const size_t slot_idx,
const size_t range) {
1466 const auto storage_info =
findStorage(entry_buff_idx);
1467 auto crt_geo_col_ptr = geo_target_ptr;
1468 for (
size_t i = slot_idx;
i < slot_idx + range;
i++) {
1470 crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_,
i);
1473 return crt_geo_col_ptr +
1474 storage_info.fixedup_entry_idx *
1475 storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1479 auto getNextTargetBuffer = [&](
const size_t slot_idx,
const size_t range) {
1481 ? getNextTargetBufferColWise(slot_idx, range)
1482 : getNextTargetBufferRowWise(slot_idx, range);
1485 auto getCoordsDataPtr = [&](
const int8_t* geo_target_ptr) {
1490 auto getCoordsLength = [&](
const int8_t* geo_target_ptr) {
1495 auto getRingSizesPtr = [&](
const int8_t* geo_target_ptr) {
1500 auto getRingSizesLength = [&](
const int8_t* geo_target_ptr) {
1505 auto getPolyRingsPtr = [&](
const int8_t* geo_target_ptr) {
1510 auto getPolyRingsLength = [&](
const int8_t* geo_target_ptr) {
1515 auto getFragColBuffers = [&]() -> decltype(
auto) {
1518 auto global_idx = getCoordsDataPtr(geo_target_ptr);
1519 return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1524 auto getDataMgr = [&]() {
1527 auto& data_mgr = executor->catalog_->getDataMgr();
1531 auto getSeparateVarlenStorage = [&]() -> decltype(
auto) {
1535 return varlen_buffer;
1539 CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1552 const auto& varlen_buffer = getSeparateVarlenStorage();
1553 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1554 varlen_buffer.size());
1556 return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1562 reinterpret_cast<int64_t>(
1563 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1564 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1566 const auto& frag_col_buffers = getFragColBuffers();
1567 return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1571 getCoordsDataPtr(geo_target_ptr));
1573 return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1576 is_gpu_fetch ? getDataMgr() :
nullptr,
1579 getCoordsDataPtr(geo_target_ptr),
1580 getCoordsLength(geo_target_ptr));
1586 const auto& varlen_buffer = getSeparateVarlenStorage();
1587 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1588 varlen_buffer.size());
1590 return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1596 reinterpret_cast<int64_t>(
1597 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1598 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1600 const auto& frag_col_buffers = getFragColBuffers();
1601 return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1605 getCoordsDataPtr(geo_target_ptr));
1607 return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1610 is_gpu_fetch ? getDataMgr() :
nullptr,
1613 getCoordsDataPtr(geo_target_ptr),
1614 getCoordsLength(geo_target_ptr));
1620 const auto& varlen_buffer = getSeparateVarlenStorage();
1621 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1622 varlen_buffer.size());
1624 return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1630 reinterpret_cast<int64_t>(
1631 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1632 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1633 reinterpret_cast<int64_t>(
1634 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1635 static_cast<int64_t>(
1636 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1638 const auto& frag_col_buffers = getFragColBuffers();
1640 return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1644 getCoordsDataPtr(geo_target_ptr),
1646 getCoordsDataPtr(geo_target_ptr));
1648 return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1651 is_gpu_fetch ? getDataMgr() :
nullptr,
1654 getCoordsDataPtr(geo_target_ptr),
1655 getCoordsLength(geo_target_ptr),
1656 getRingSizesPtr(geo_target_ptr),
1657 getRingSizesLength(geo_target_ptr) * 4);
1663 const auto& varlen_buffer = getSeparateVarlenStorage();
1664 CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1665 varlen_buffer.size());
1667 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1673 reinterpret_cast<int64_t>(
1674 varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1675 static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1676 reinterpret_cast<int64_t>(
1677 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1678 static_cast<int64_t>(
1679 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1680 reinterpret_cast<int64_t>(
1681 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1682 static_cast<int64_t>(
1683 varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1685 const auto& frag_col_buffers = getFragColBuffers();
1687 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1691 getCoordsDataPtr(geo_target_ptr),
1693 getCoordsDataPtr(geo_target_ptr),
1695 getCoordsDataPtr(geo_target_ptr));
1697 return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1700 is_gpu_fetch ? getDataMgr() :
nullptr,
1703 getCoordsDataPtr(geo_target_ptr),
1704 getCoordsLength(geo_target_ptr),
1705 getRingSizesPtr(geo_target_ptr),
1706 getRingSizesLength(geo_target_ptr) * 4,
1707 getPolyRingsPtr(geo_target_ptr),
1708 getPolyRingsLength(geo_target_ptr) * 4);
1713 throw std::runtime_error(
"Unknown Geometry type encountered: " +
1722 const int8_t compact_sz,
1724 const size_t target_logical_idx,
1725 const bool translate_strings,
1726 const bool decimal_to_double,
1727 const size_t entry_buff_idx)
const {
1728 auto actual_compact_sz = compact_sz;
1729 const auto& type_info = target_info.
sql_type;
1732 actual_compact_sz =
sizeof(float);
1734 actual_compact_sz =
sizeof(double);
1736 if (target_info.
is_agg &&
1742 actual_compact_sz =
sizeof(float);
1747 actual_compact_sz =
sizeof(int64_t);
1751 if (type_info.is_string() && type_info.get_compression() ==
kENCODING_DICT &&
1752 type_info.get_comp_param()) {
1753 actual_compact_sz =
sizeof(int32_t);
1761 if (col_lazy_fetch.is_lazily_fetched) {
1765 auto& frag_col_buffers =
getColumnFrag(storage_idx.first, target_logical_idx, ival);
1766 CHECK_LT(
size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1768 col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1769 if (chosen_type.is_fp()) {
1770 const auto dval = *
reinterpret_cast<const double*
>(may_alias_ptr(&ival));
1771 if (chosen_type.get_type() ==
kFLOAT) {
1779 if (chosen_type.is_fp()) {
1786 switch (actual_compact_sz) {
1788 const auto dval = *
reinterpret_cast<const double*
>(ptr);
1789 return chosen_type.get_type() ==
kFLOAT
1795 return *
reinterpret_cast<const float*
>(ptr);
1801 if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1802 chosen_type.is_timeinterval()) {
1815 if (chosen_type.is_string() && chosen_type.get_compression() ==
kENCODING_DICT) {
1816 if (translate_strings) {
1817 if (static_cast<int32_t>(ival) ==
1822 if (!chosen_type.get_comp_param()) {
1827 chosen_type.get_comp_param(),
false,
catalog_)
1829 chosen_type.get_comp_param());
1833 return static_cast<int64_t
>(
static_cast<int32_t
>(ival));
1836 if (chosen_type.is_decimal()) {
1837 if (decimal_to_double) {
1838 if (target_info.
is_agg &&
1848 return static_cast<double>(ival) /
exp_to_scale(chosen_type.get_scale());
1861 const int8_t* col_ptr,
1862 const int8_t* keys_ptr,
1864 const size_t local_entry_idx,
1865 const size_t global_entry_idx,
1867 const size_t target_logical_idx,
1868 const size_t slot_idx,
1869 const bool translate_strings,
1870 const bool decimal_to_double)
const {
1872 const auto col1_ptr = col_ptr;
1874 const auto next_col_ptr =
1889 col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1934 int8_t* rowwise_target_ptr,
1936 const size_t entry_buff_idx,
1938 const size_t target_logical_idx,
1939 const size_t slot_idx,
1940 const bool translate_strings,
1941 const bool decimal_to_double,
1942 const bool fixup_count_distinct_pointers)
const {
1943 if (
UNLIKELY(fixup_count_distinct_pointers)) {
1945 auto count_distinct_ptr_ptr =
reinterpret_cast<int64_t*
>(rowwise_target_ptr);
1946 const auto remote_ptr = *count_distinct_ptr_ptr;
1948 const auto ptr =
storage_->mappedPtr(remote_ptr);
1950 *count_distinct_ptr_ptr = ptr;
1953 const auto& count_distinct_desc =
1955 const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
1956 ? count_distinct_desc.bitmapSizeBytes()
1957 : count_distinct_desc.bitmapPaddedSizeBytes();
1960 *count_distinct_ptr_ptr =
reinterpret_cast<int64_t
>(count_distinct_buffer);
1968 rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
1971 auto ptr1 = rowwise_target_ptr;
1986 int8_t compact_sz2 = 0;
2038 return isEmptyEntryColumnar(entry_idx, buff);
2045 target_init_vals_.size());
2059 return *
reinterpret_cast<const int32_t*
>(keys_ptr) ==
EMPTY_KEY_32;
2061 return *
reinterpret_cast<const int64_t*
>(keys_ptr) ==
EMPTY_KEY_64;
2074 const int8_t* buff)
const {
2085 target_init_vals_.size());
2088 const auto entry_buff =
2098 return reinterpret_cast<const int64_t*
>(buff)[entry_idx] ==
EMPTY_KEY_64;
2104 return reinterpret_cast<const int64_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_64;
2106 return reinterpret_cast<const int32_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_32;
2108 return reinterpret_cast<const int16_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_16;
2110 return reinterpret_cast<const int8_t*
>(target_buff)[entry_idx] ==
EMPTY_KEY_8;
2122 template <
typename T>
2125 if (!is_empty_fn(r - 1)) {
2131 size_t c = (l +
r) / 2;
2132 if (is_empty_fn(c)) {
2154 return reinterpret_cast<const int64_t*
>(buff_)[idx] ==
EMPTY_KEY_64;
2159 return *
reinterpret_cast<const int64_t*
>(keys_ptr) ==
EMPTY_KEY_64;
2165 return isEmptyEntry(entry_idx, buff_);
2170 const bool float_argument_input) {
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
size_t getSlotCount() const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
size_t getEntryCount() const
bool isEmptyEntry(const size_t entry_idx, const int8_t *buff) const
ENTRY_TYPE getRowWisePerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
class for a per-database catalog. also includes metadata for the current database and the current use...
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
AppendedStorage appended_storage_
ENTRY_TYPE getColumnarPerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
int64_t getTargetGroupbyIndex(const size_t target_idx) const
GeoReturnType geo_return_type_
bool isEmptyEntryColumnar(const size_t entry_idx, const int8_t *buff) const
bool isLogicalSizedColumnsAllowed() const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
TargetValue build_string_array_target_value(const int32_t *buff, const size_t buff_sz, const int dict_id, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog)
size_t make_bin_search(size_t l, size_t r, T &&is_empty_fn)
std::unique_ptr< ArrayDatum > lazy_fetch_chunk(const int8_t *ptr, const int64_t varlen_ptr)
const Catalog_Namespace::Catalog * catalog_
unsigned long long CUdeviceptr
std::vector< TargetValue > getNextRow(const bool translate_strings, const bool decimal_to_double) const
static bool isNull(const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
QueryMemoryDescriptor query_mem_desc_
bool hasKeylessHash() const
std::unique_ptr< ResultSetStorage > storage_
bool is_null_point(const SQLTypeInfo &geo_ti, const int8_t *coords, const size_t coords_sz)
std::string getString(int32_t string_id) const
ENTRY_TYPE getEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
size_t getEffectiveKeyWidth() const
Constants for Builtin SQL Types supported by OmniSci.
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
bool takes_float_argument(const TargetInfo &target_info)
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
int64_t lazyReadInt(const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
HOST DEVICE SQLTypes get_type() const
OneIntegerColumnRow getOneColRow(const size_t index) const
TargetValue getTargetValueFromBufferColwise(const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
T advance_target_ptr_row_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
const ResultSet * result_set_
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
const int8_t * advance_col_buff_to_slot(const int8_t *buff, const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const size_t slot_idx, const bool separate_varlen_storage)
Serialization routines for geospatial types.
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
const SQLTypeInfo get_compact_type(const TargetInfo &target)
bool forceFourByteFloat() const
InternalTargetValue getVarlenOrderEntry(const int64_t str_ptr, const size_t str_len) const
const std::vector< TargetInfo > targets_
int8_t groupColWidth(const size_t key_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
CONSTEXPR DEVICE bool is_null(const T &value)
static TargetValue build(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
Classes representing a parse tree.
static auto yieldGpuPtrFetcher()
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
size_t getGroupbyColCount() const
size_t targetGroupbyIndicesSize() const
size_t binSearchRowCount() const
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
CountDistinctDescriptors count_distinct_descriptors_
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
bool is_distinct_target(const TargetInfo &target_info)
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
void copyColumnIntoBuffer(const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
bool g_enable_smem_group_by true
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
QueryDescriptionType getQueryDescriptionType() const
static double calculateQuantile(quantile::TDigest *const t_digest, double const q)
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
std::vector< TargetValue > getRowAt(const size_t index) const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
HOST DEVICE EncodingType get_compression() const
bool is_date_in_days() const
int64_t int_resize_cast(const int64_t ival, const size_t sz)
boost::optional< boost::variant< GeoPointTargetValue, GeoLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
int get_array_context_logical_size() const
bool isGeoColOnGpu(const size_t col_idx) const
void initializeOffsetsForStorage()
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
#define DEF_GET_ENTRY_AT(query_type, columnar_output)
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, Data_Namespace::DataMgr *data_mgr, const bool fetch_data_from_gpu, const int device_id, T &&...vals)
std::string get_type_name() const
boost::variant< std::string, void * > NullableString
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
static auto yieldGpuDatumFetcher(Data_Namespace::DataMgr *data_mgr_ptr, const int device_id)
const bool is_lazily_fetched
std::vector< std::vector< int64_t > > consistent_frag_sizes_
bool didOutputColumnar() const
const ExecutorDeviceType device_type_
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
bool g_enable_watchdog false
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
ScalarTargetValue make_scalar_tv(const T val)
size_t getBufferSizeBytes(const ExecutorDeviceType device_type) const
uint64_t exp_to_scale(const unsigned exp)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Basic constructors and methods of the row set interface.
bool separate_varlen_storage_valid_
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< std::pair< const int8_t *, const int64_t > > make_vals_vector(std::index_sequence< indices...>, const Tuple &tuple)
static auto yieldCpuDatumFetcher()
T advance_target_ptr_col_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
size_t advanceCursorToNextEntry() const
HOST DEVICE bool get_notnull() const
ENTRY_TYPE getColumnarBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
std::vector< std::vector< TargetOffsets > > offsets_for_storage_
SQLTypeInfo get_elem_type() const
size_t getBufferColSlotCount() const
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
void initializeOffsetsForStorage()
bool global_entry_idx_valid_
std::unique_ptr< VarlenDatum > VarlenDatumPtr
bool isDirectColumnarConversionPossible() const
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
std::unique_ptr< ArrayDatum > fetch_data_from_gpu(int64_t varlen_ptr, const int64_t length, Data_Namespace::DataMgr *data_mgr, const int device_id)
ENTRY_TYPE getRowWiseBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
int32_t getTargetIdxForKey() const
const size_t fixedup_entry_idx
size_t getPrependedGroupColOffInBytes(const size_t group_idx) const