OmniSciDB  c0231cc57d
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
ResultSetIteration.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
23 #include "Execute.h"
24 #include "Geospatial/Compression.h"
25 #include "Geospatial/Types.h"
26 #include "ParserNode.h"
30 #include "ResultSet.h"
32 #include "RuntimeFunctions.h"
33 #include "Shared/SqlTypesLayout.h"
34 #include "Shared/likely.h"
35 #include "Shared/sqltypes.h"
36 #include "TypePunning.h"
37 
38 #include <boost/math/special_functions/fpclassify.hpp>
39 
40 #include <memory>
41 #include <utility>
42 
43 namespace {
44 
45 // Interprets ptr1, ptr2 as the sum and count pair used for AVG.
47  const int8_t compact_sz1,
48  const int8_t* ptr2,
49  const int8_t compact_sz2,
50  const TargetInfo& target_info) {
51  int64_t sum{0};
52  CHECK(target_info.agg_kind == kAVG);
53  const bool float_argument_input = takes_float_argument(target_info);
54  const auto actual_compact_sz1 = float_argument_input ? sizeof(float) : compact_sz1;
55  const auto& agg_ti = target_info.agg_arg_type;
56  if (agg_ti.is_integer() || agg_ti.is_decimal()) {
57  sum = read_int_from_buff(ptr1, actual_compact_sz1);
58  } else if (agg_ti.is_fp()) {
59  switch (actual_compact_sz1) {
60  case 8: {
61  double d = *reinterpret_cast<const double*>(ptr1);
62  sum = *reinterpret_cast<const int64_t*>(may_alias_ptr(&d));
63  break;
64  }
65  case 4: {
66  double d = *reinterpret_cast<const float*>(ptr1);
67  sum = *reinterpret_cast<const int64_t*>(may_alias_ptr(&d));
68  break;
69  }
70  default:
71  CHECK(false);
72  }
73  } else {
74  CHECK(false);
75  }
76  const auto count = read_int_from_buff(ptr2, compact_sz2);
77  return pair_to_double({sum, count}, target_info.sql_type, false);
78 }
79 
80 // Given the entire buffer for the result set, buff, finds the beginning of the
81 // column for slot_idx. Only makes sense for column-wise representation.
82 const int8_t* advance_col_buff_to_slot(const int8_t* buff,
84  const std::vector<TargetInfo>& targets,
85  const size_t slot_idx,
86  const bool separate_varlen_storage) {
87  auto crt_col_ptr = get_cols_ptr(buff, query_mem_desc);
88  const auto buffer_col_count = query_mem_desc.getBufferColSlotCount();
89  size_t agg_col_idx{0};
90  for (size_t target_idx = 0; target_idx < targets.size(); ++target_idx) {
91  if (agg_col_idx == slot_idx) {
92  return crt_col_ptr;
93  }
94  CHECK_LT(agg_col_idx, buffer_col_count);
95  const auto& agg_info = targets[target_idx];
96  crt_col_ptr =
97  advance_to_next_columnar_target_buff(crt_col_ptr, query_mem_desc, agg_col_idx);
98  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
99  if (agg_col_idx + 1 == slot_idx) {
100  return crt_col_ptr;
101  }
103  crt_col_ptr, query_mem_desc, agg_col_idx + 1);
104  }
105  agg_col_idx = advance_slot(agg_col_idx, agg_info, separate_varlen_storage);
106  }
107  CHECK(false);
108  return nullptr;
109 }
110 } // namespace
111 
112 // Gets the byte offset, starting from the beginning of the row targets buffer, of
113 // the value in position slot_idx (only makes sense for row-wise representation).
114 size_t result_set::get_byteoff_of_slot(const size_t slot_idx,
116  return query_mem_desc.getPaddedColWidthForRange(0, slot_idx);
117 }
118 
119 std::vector<TargetValue> ResultSet::getRowAt(
120  const size_t global_entry_idx,
121  const bool translate_strings,
122  const bool decimal_to_double,
123  const bool fixup_count_distinct_pointers,
124  const std::vector<bool>& targets_to_skip /* = {}*/) const {
125  const auto storage_lookup_result =
126  fixup_count_distinct_pointers
127  ? StorageLookupResult{storage_.get(), global_entry_idx, 0}
128  : findStorage(global_entry_idx);
129  const auto storage = storage_lookup_result.storage_ptr;
130  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
131  if (!fixup_count_distinct_pointers && storage->isEmptyEntry(local_entry_idx)) {
132  return {};
133  }
134  const auto buff = storage->buff_;
135  CHECK(buff);
136  std::vector<TargetValue> row;
137  size_t agg_col_idx = 0;
138  int8_t* rowwise_target_ptr{nullptr};
139  int8_t* keys_ptr{nullptr};
140  const int8_t* crt_col_ptr{nullptr};
141  if (query_mem_desc_.didOutputColumnar()) {
142  keys_ptr = buff;
143  crt_col_ptr = get_cols_ptr(buff, storage->query_mem_desc_);
144  } else {
145  keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
146  const auto key_bytes_with_padding =
147  align_to_int64(get_key_bytes_rowwise(query_mem_desc_));
148  rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
149  }
150  for (size_t target_idx = 0; target_idx < storage->targets_.size(); ++target_idx) {
151  const auto& agg_info = storage->targets_[target_idx];
152  if (query_mem_desc_.didOutputColumnar()) {
153  if (UNLIKELY(!targets_to_skip.empty())) {
154  row.push_back(!targets_to_skip[target_idx]
155  ? getTargetValueFromBufferColwise(crt_col_ptr,
156  keys_ptr,
157  storage->query_mem_desc_,
158  local_entry_idx,
159  global_entry_idx,
160  agg_info,
161  target_idx,
162  agg_col_idx,
163  translate_strings,
164  decimal_to_double)
165  : nullptr);
166  } else {
167  row.push_back(getTargetValueFromBufferColwise(crt_col_ptr,
168  keys_ptr,
169  storage->query_mem_desc_,
170  local_entry_idx,
171  global_entry_idx,
172  agg_info,
173  target_idx,
174  agg_col_idx,
175  translate_strings,
176  decimal_to_double));
177  }
178  crt_col_ptr = advance_target_ptr_col_wise(crt_col_ptr,
179  agg_info,
180  agg_col_idx,
181  storage->query_mem_desc_,
182  separate_varlen_storage_valid_);
183  } else {
184  if (UNLIKELY(!targets_to_skip.empty())) {
185  row.push_back(!targets_to_skip[target_idx]
186  ? getTargetValueFromBufferRowwise(rowwise_target_ptr,
187  keys_ptr,
188  global_entry_idx,
189  agg_info,
190  target_idx,
191  agg_col_idx,
192  translate_strings,
193  decimal_to_double,
194  fixup_count_distinct_pointers)
195  : nullptr);
196  } else {
197  row.push_back(getTargetValueFromBufferRowwise(rowwise_target_ptr,
198  keys_ptr,
199  global_entry_idx,
200  agg_info,
201  target_idx,
202  agg_col_idx,
203  translate_strings,
204  decimal_to_double,
205  fixup_count_distinct_pointers));
206  }
207  rowwise_target_ptr = advance_target_ptr_row_wise(rowwise_target_ptr,
208  agg_info,
209  agg_col_idx,
210  query_mem_desc_,
211  separate_varlen_storage_valid_);
212  }
213  agg_col_idx = advance_slot(agg_col_idx, agg_info, separate_varlen_storage_valid_);
214  }
215 
216  return row;
217 }
218 
219 TargetValue ResultSet::getRowAt(const size_t row_idx,
220  const size_t col_idx,
221  const bool translate_strings,
222  const bool decimal_to_double /* = true */) const {
223  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
224  moveToBegin();
225  for (size_t i = 0; i < row_idx; ++i) {
226  auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
227  CHECK(!crt_row.empty());
228  }
229  auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
230  CHECK(!crt_row.empty());
231  return crt_row[col_idx];
232 }
233 
234 OneIntegerColumnRow ResultSet::getOneColRow(const size_t global_entry_idx) const {
235  const auto storage_lookup_result = findStorage(global_entry_idx);
236  const auto storage = storage_lookup_result.storage_ptr;
237  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
238  if (storage->isEmptyEntry(local_entry_idx)) {
239  return {0, false};
240  }
241  const auto buff = storage->buff_;
242  CHECK(buff);
243  CHECK(!query_mem_desc_.didOutputColumnar());
244  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
245  const auto key_bytes_with_padding =
246  align_to_int64(get_key_bytes_rowwise(query_mem_desc_));
247  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
248  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
249  keys_ptr,
250  global_entry_idx,
251  targets_.front(),
252  0,
253  0,
254  false,
255  false,
256  false);
257  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
258  CHECK(scalar_tv);
259  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
260  CHECK(ival_ptr);
261  return {*ival_ptr, true};
262 }
263 
264 std::vector<TargetValue> ResultSet::getRowAt(const size_t logical_index) const {
265  if (logical_index >= entryCount()) {
266  return {};
267  }
268  const auto entry_idx =
269  permutation_.empty() ? logical_index : permutation_[logical_index];
270  return getRowAt(entry_idx, true, false, false);
271 }
272 
273 std::vector<TargetValue> ResultSet::getRowAtNoTranslations(
274  const size_t logical_index,
275  const std::vector<bool>& targets_to_skip /* = {}*/) const {
276  if (logical_index >= entryCount()) {
277  return {};
278  }
279  const auto entry_idx =
280  permutation_.empty() ? logical_index : permutation_[logical_index];
281  return getRowAt(entry_idx, false, false, false, targets_to_skip);
282 }
283 
284 bool ResultSet::isRowAtEmpty(const size_t logical_index) const {
285  if (logical_index >= entryCount()) {
286  return true;
287  }
288  const auto entry_idx =
289  permutation_.empty() ? logical_index : permutation_[logical_index];
290  const auto storage_lookup_result = findStorage(entry_idx);
291  const auto storage = storage_lookup_result.storage_ptr;
292  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
293  return storage->isEmptyEntry(local_entry_idx);
294 }
295 
296 std::vector<TargetValue> ResultSet::getNextRow(const bool translate_strings,
297  const bool decimal_to_double) const {
298  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
299  if (!storage_ && !just_explain_) {
300  return {};
301  }
302  return getNextRowUnlocked(translate_strings, decimal_to_double);
303 }
304 
305 std::vector<TargetValue> ResultSet::getNextRowUnlocked(
306  const bool translate_strings,
307  const bool decimal_to_double) const {
308  if (just_explain_) {
309  if (fetched_so_far_) {
310  return {};
311  }
312  fetched_so_far_ = 1;
313  return {explanation_};
314  }
315  return getNextRowImpl(translate_strings, decimal_to_double);
316 }
317 
318 std::vector<TargetValue> ResultSet::getNextRowImpl(const bool translate_strings,
319  const bool decimal_to_double) const {
320  size_t entry_buff_idx = 0;
321  do {
322  if (keep_first_ && fetched_so_far_ >= drop_first_ + keep_first_) {
323  return {};
324  }
325 
326  entry_buff_idx = advanceCursorToNextEntry();
327 
328  if (crt_row_buff_idx_ >= entryCount()) {
329  CHECK_EQ(entryCount(), crt_row_buff_idx_);
330  return {};
331  }
332  ++crt_row_buff_idx_;
333  ++fetched_so_far_;
334 
335  } while (drop_first_ && fetched_so_far_ <= drop_first_);
336 
337  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
338  CHECK(!row.empty());
339 
340  return row;
341 }
342 
343 namespace {
344 
345 const int8_t* columnar_elem_ptr(const size_t entry_idx,
346  const int8_t* col1_ptr,
347  const int8_t compact_sz1) {
348  return col1_ptr + compact_sz1 * entry_idx;
349 }
350 
351 int64_t int_resize_cast(const int64_t ival, const size_t sz) {
352  switch (sz) {
353  case 8:
354  return ival;
355  case 4:
356  return static_cast<int32_t>(ival);
357  case 2:
358  return static_cast<int16_t>(ival);
359  case 1:
360  return static_cast<int8_t>(ival);
361  default:
362  UNREACHABLE();
363  }
364  UNREACHABLE();
365  return 0;
366 }
367 
368 } // namespace
369 
371  // Compute offsets for base storage and all appended storage
372  for (size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
373  ++storage_idx) {
374  offsets_for_storage_.emplace_back();
375 
376  const int8_t* rowwise_target_ptr{0};
377 
378  size_t agg_col_idx = 0;
379  for (size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
380  ++target_idx) {
381  const auto& agg_info = result_set_->storage_->targets_[target_idx];
382 
383  auto ptr1 = rowwise_target_ptr;
384  const auto compact_sz1 =
385  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
386  ? result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
387  : key_width_;
388 
389  const int8_t* ptr2{nullptr};
390  int8_t compact_sz2{0};
391  if ((agg_info.is_agg && agg_info.agg_kind == kAVG)) {
392  ptr2 = ptr1 + compact_sz1;
393  compact_sz2 =
394  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
395  } else if (is_real_str_or_array(agg_info)) {
396  ptr2 = ptr1 + compact_sz1;
397  if (!result_set_->separate_varlen_storage_valid_) {
398  // None encoded strings explicitly attached to ResultSetStorage do not have a
399  // second slot in the QueryMemoryDescriptor col width vector
400  compact_sz2 =
401  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
402  }
403  }
404  offsets_for_storage_[storage_idx].push_back(
405  TargetOffsets{ptr1,
406  static_cast<size_t>(compact_sz1),
407  ptr2,
408  static_cast<size_t>(compact_sz2)});
409  rowwise_target_ptr =
410  advance_target_ptr_row_wise(rowwise_target_ptr,
411  agg_info,
412  agg_col_idx,
413  result_set_->query_mem_desc_,
414  result_set_->separate_varlen_storage_valid_);
415 
416  agg_col_idx = advance_slot(
417  agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
418  }
419  CHECK_EQ(offsets_for_storage_[storage_idx].size(),
420  result_set_->storage_->targets_.size());
421  }
422 }
423 
425  const int8_t* buff,
426  const size_t entry_idx,
427  const size_t target_logical_idx,
428  const StorageLookupResult& storage_lookup_result) const {
429  CHECK(buff);
430  const int8_t* rowwise_target_ptr{nullptr};
431  const int8_t* keys_ptr{nullptr};
432 
433  const size_t storage_idx = storage_lookup_result.storage_idx;
434 
435  CHECK_LT(storage_idx, offsets_for_storage_.size());
436  CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
437 
438  const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
439  const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
440  const auto& type_info = agg_info.sql_type;
441 
442  keys_ptr = get_rowwise_ptr(buff, entry_idx);
443  rowwise_target_ptr = keys_ptr + key_bytes_with_padding_;
444  auto ptr1 = rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr1);
445  if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
446  if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
447  ptr1 = keys_ptr +
448  result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
449  key_width_;
450  }
451  }
452  const auto i1 =
453  result_set_->lazyReadInt(read_int_from_buff(ptr1, offsets_for_target.compact_sz1),
454  target_logical_idx,
455  storage_lookup_result);
456  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
457  CHECK(offsets_for_target.ptr2);
458  const auto ptr2 =
459  rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr2);
460  const auto i2 = read_int_from_buff(ptr2, offsets_for_target.compact_sz2);
461  return InternalTargetValue(i1, i2);
462  } else {
463  if (type_info.is_string() && type_info.get_compression() == kENCODING_NONE) {
464  CHECK(!agg_info.is_agg);
465  if (!result_set_->lazy_fetch_info_.empty()) {
466  CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
467  const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
468  if (col_lazy_fetch.is_lazily_fetched) {
469  return InternalTargetValue(reinterpret_cast<const std::string*>(i1));
470  }
471  }
472  if (result_set_->separate_varlen_storage_valid_) {
473  if (i1 < 0) {
474  CHECK_EQ(-1, i1);
475  return InternalTargetValue(static_cast<const std::string*>(nullptr));
476  }
477  CHECK_LT(storage_lookup_result.storage_idx,
478  result_set_->serialized_varlen_buffer_.size());
479  const auto& varlen_buffer_for_fragment =
480  result_set_->serialized_varlen_buffer_[storage_lookup_result.storage_idx];
481  CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
482  return InternalTargetValue(&varlen_buffer_for_fragment[i1]);
483  }
484  CHECK(offsets_for_target.ptr2);
485  const auto ptr2 =
486  rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr2);
487  const auto str_len = read_int_from_buff(ptr2, offsets_for_target.compact_sz2);
488  CHECK_GE(str_len, 0);
489  return result_set_->getVarlenOrderEntry(i1, str_len);
490  }
491  return InternalTargetValue(
492  type_info.is_fp() ? i1 : int_resize_cast(i1, type_info.get_logical_size()));
493  }
494 }
495 
497  // Compute offsets for base storage and all appended storage
498  const auto key_width = result_set_->query_mem_desc_.getEffectiveKeyWidth();
499  for (size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
500  ++storage_idx) {
501  offsets_for_storage_.emplace_back();
502 
503  const int8_t* buff = storage_idx == 0
504  ? result_set_->storage_->buff_
505  : result_set_->appended_storage_[storage_idx - 1]->buff_;
506  CHECK(buff);
507 
508  const auto& crt_query_mem_desc =
509  storage_idx == 0
510  ? result_set_->storage_->query_mem_desc_
511  : result_set_->appended_storage_[storage_idx - 1]->query_mem_desc_;
512  const int8_t* crt_col_ptr = get_cols_ptr(buff, crt_query_mem_desc);
513 
514  size_t agg_col_idx = 0;
515  for (size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
516  ++target_idx) {
517  const auto& agg_info = result_set_->storage_->targets_[target_idx];
518 
519  const auto compact_sz1 =
520  crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
521  ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
522  : key_width;
523 
524  const auto next_col_ptr = advance_to_next_columnar_target_buff(
525  crt_col_ptr, crt_query_mem_desc, agg_col_idx);
526  const bool uses_two_slots = (agg_info.is_agg && agg_info.agg_kind == kAVG) ||
527  is_real_str_or_array(agg_info);
528  const auto col2_ptr = uses_two_slots ? next_col_ptr : nullptr;
529  const auto compact_sz2 =
530  (agg_info.is_agg && agg_info.agg_kind == kAVG) || is_real_str_or_array(agg_info)
531  ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx + 1)
532  : 0;
533 
534  offsets_for_storage_[storage_idx].push_back(
535  TargetOffsets{crt_col_ptr,
536  static_cast<size_t>(compact_sz1),
537  col2_ptr,
538  static_cast<size_t>(compact_sz2)});
539 
540  crt_col_ptr = next_col_ptr;
541  if (uses_two_slots) {
543  crt_col_ptr, crt_query_mem_desc, agg_col_idx + 1);
544  }
545  agg_col_idx = advance_slot(
546  agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
547  }
548  CHECK_EQ(offsets_for_storage_[storage_idx].size(),
549  result_set_->storage_->targets_.size());
550  }
551 }
552 
554  const int8_t* buff,
555  const size_t entry_idx,
556  const size_t target_logical_idx,
557  const StorageLookupResult& storage_lookup_result) const {
558  const size_t storage_idx = storage_lookup_result.storage_idx;
559 
560  CHECK_LT(storage_idx, offsets_for_storage_.size());
561  CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
562 
563  const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
564  const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
565  const auto& type_info = agg_info.sql_type;
566  auto ptr1 = offsets_for_target.ptr1;
567  if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
568  if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
569  ptr1 =
570  buff + result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
571  result_set_->query_mem_desc_.getEffectiveKeyWidth() *
572  result_set_->query_mem_desc_.entry_count_;
573  }
574  }
575 
576  const auto i1 = result_set_->lazyReadInt(
578  columnar_elem_ptr(entry_idx, ptr1, offsets_for_target.compact_sz1),
579  offsets_for_target.compact_sz1),
580  target_logical_idx,
581  storage_lookup_result);
582  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
583  CHECK(offsets_for_target.ptr2);
584  const auto i2 = read_int_from_buff(
586  entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
587  offsets_for_target.compact_sz2);
588  return InternalTargetValue(i1, i2);
589  } else {
590  // for TEXT ENCODING NONE:
591  if (type_info.is_string() && type_info.get_compression() == kENCODING_NONE) {
592  CHECK(!agg_info.is_agg);
593  if (!result_set_->lazy_fetch_info_.empty()) {
594  CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
595  const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
596  if (col_lazy_fetch.is_lazily_fetched) {
597  return InternalTargetValue(reinterpret_cast<const std::string*>(i1));
598  }
599  }
600  if (result_set_->separate_varlen_storage_valid_) {
601  if (i1 < 0) {
602  CHECK_EQ(-1, i1);
603  return InternalTargetValue(static_cast<const std::string*>(nullptr));
604  }
605  CHECK_LT(storage_lookup_result.storage_idx,
606  result_set_->serialized_varlen_buffer_.size());
607  const auto& varlen_buffer_for_fragment =
608  result_set_->serialized_varlen_buffer_[storage_lookup_result.storage_idx];
609  CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
610  return InternalTargetValue(&varlen_buffer_for_fragment[i1]);
611  }
612  CHECK(offsets_for_target.ptr2);
613  const auto i2 = read_int_from_buff(
615  entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
616  offsets_for_target.compact_sz2);
617  CHECK_GE(i2, 0);
618  return result_set_->getVarlenOrderEntry(i1, i2);
619  }
620  return InternalTargetValue(
621  type_info.is_fp() ? i1 : int_resize_cast(i1, type_info.get_logical_size()));
622  }
623 }
624 
626  const size_t str_len) const {
627  char* host_str_ptr{nullptr};
628  std::vector<int8_t> cpu_buffer;
630  cpu_buffer.resize(str_len);
631  const auto executor = query_mem_desc_.getExecutor();
632  CHECK(executor);
633  auto data_mgr = executor->getDataMgr();
634  auto allocator = std::make_unique<CudaAllocator>(
635  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
636  allocator->copyFromDevice(
637  &cpu_buffer[0], reinterpret_cast<int8_t*>(str_ptr), str_len);
638  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
639  } else {
641  host_str_ptr = reinterpret_cast<char*>(str_ptr);
642  }
643  std::string str(host_str_ptr, str_len);
644  return InternalTargetValue(row_set_mem_owner_->addString(str));
645 }
646 
647 int64_t ResultSet::lazyReadInt(const int64_t ival,
648  const size_t target_logical_idx,
649  const StorageLookupResult& storage_lookup_result) const {
650  if (!lazy_fetch_info_.empty()) {
651  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
652  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
653  if (col_lazy_fetch.is_lazily_fetched) {
654  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
655  col_buffers_.size());
656  int64_t ival_copy = ival;
657  auto& frag_col_buffers =
658  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
659  target_logical_idx,
660  ival_copy);
661  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
662  CHECK_LT(target_logical_idx, targets_.size());
663  const TargetInfo& target_info = targets_[target_logical_idx];
664  CHECK(!target_info.is_agg);
665  if (target_info.sql_type.is_string() &&
666  target_info.sql_type.get_compression() == kENCODING_NONE) {
667  VarlenDatum vd;
668  bool is_end{false};
670  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
671  storage_lookup_result.fixedup_entry_idx,
672  false,
673  &vd,
674  &is_end);
675  CHECK(!is_end);
676  if (vd.is_null) {
677  return 0;
678  }
679  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
680  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
681  }
682  return result_set::lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
683  }
684  }
685  return ival;
686 }
687 
688 // Not all entries in the buffer represent a valid row. Advance the internal cursor
689 // used for the getNextRow method to the next row which is valid.
692  iter.global_entry_idx_valid_ = false;
693  return;
694  }
695 
696  while (iter.crt_row_buff_idx_ < entryCount()) {
697  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
699  const auto storage_lookup_result = findStorage(entry_idx);
700  const auto storage = storage_lookup_result.storage_ptr;
701  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
702  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
703  if (iter.fetched_so_far_ < drop_first_) {
704  ++iter.fetched_so_far_;
705  } else {
706  break;
707  }
708  }
709  ++iter.crt_row_buff_idx_;
710  }
711  if (permutation_.empty()) {
713  } else {
715  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
716  ? iter.crt_row_buff_idx_
718  }
719 
721 
722  if (iter.global_entry_idx_valid_) {
723  ++iter.crt_row_buff_idx_;
724  ++iter.fetched_so_far_;
725  }
726 }
727 
728 // Not all entries in the buffer represent a valid row. Advance the internal cursor
729 // used for the getNextRow method to the next row which is valid.
731  while (crt_row_buff_idx_ < entryCount()) {
732  const auto entry_idx =
734  const auto storage_lookup_result = findStorage(entry_idx);
735  const auto storage = storage_lookup_result.storage_ptr;
736  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
737  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
738  break;
739  }
741  }
742  if (permutation_.empty()) {
743  return crt_row_buff_idx_;
744  }
746  return crt_row_buff_idx_ == permutation_.size() ? crt_row_buff_idx_
748 }
749 
750 size_t ResultSet::entryCount() const {
751  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
752 }
753 
754 size_t ResultSet::getBufferSizeBytes(const ExecutorDeviceType device_type) const {
755  CHECK(storage_);
756  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
757 }
758 
759 namespace {
760 
761 template <class T>
763  return ScalarTargetValue(static_cast<int64_t>(val));
764 }
765 
766 template <>
768  return ScalarTargetValue(val);
769 }
770 
771 template <>
773  return ScalarTargetValue(val);
774 }
775 
776 template <class T>
778  const int8_t* buff,
779  const size_t buff_sz,
780  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner) {
781  std::vector<ScalarTargetValue> values;
782  auto buff_elems = reinterpret_cast<const T*>(buff);
783  CHECK_EQ(size_t(0), buff_sz % sizeof(T));
784  const size_t num_elems = buff_sz / sizeof(T);
785  for (size_t i = 0; i < num_elems; ++i) {
786  values.push_back(make_scalar_tv<T>(buff_elems[i]));
787  }
788  return ArrayTargetValue(values);
789 }
790 
792  const int32_t* buff,
793  const size_t buff_sz,
794  const int dict_id,
795  const bool translate_strings,
796  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
797  const Catalog_Namespace::Catalog* catalog) {
798  std::vector<ScalarTargetValue> values;
799  CHECK_EQ(size_t(0), buff_sz % sizeof(int32_t));
800  const size_t num_elems = buff_sz / sizeof(int32_t);
801  if (translate_strings) {
802  for (size_t i = 0; i < num_elems; ++i) {
803  const auto string_id = buff[i];
804 
805  if (string_id == NULL_INT) {
806  values.emplace_back(NullableString(nullptr));
807  } else {
808  if (dict_id == 0) {
809  StringDictionaryProxy* sdp = row_set_mem_owner->getLiteralStringDictProxy();
810  values.emplace_back(sdp->getString(string_id));
811  } else {
812  values.emplace_back(NullableString(
813  row_set_mem_owner
814  ->getOrAddStringDictProxy(dict_id, /*with_generation=*/false, catalog)
815  ->getString(string_id)));
816  }
817  }
818  }
819  } else {
820  for (size_t i = 0; i < num_elems; i++) {
821  values.emplace_back(static_cast<int64_t>(buff[i]));
822  }
823  }
824  return ArrayTargetValue(values);
825 }
826 
828  const int8_t* buff,
829  const size_t buff_sz,
830  const bool translate_strings,
831  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
832  const Catalog_Namespace::Catalog* catalog) {
833  CHECK(array_ti.is_array());
834  const auto& elem_ti = array_ti.get_elem_type();
835  if (elem_ti.is_string()) {
836  return build_string_array_target_value(reinterpret_cast<const int32_t*>(buff),
837  buff_sz,
838  elem_ti.get_comp_param(),
839  translate_strings,
840  row_set_mem_owner,
841  catalog);
842  }
843  switch (elem_ti.get_size()) {
844  case 1:
845  return build_array_target_value<int8_t>(buff, buff_sz, row_set_mem_owner);
846  case 2:
847  return build_array_target_value<int16_t>(buff, buff_sz, row_set_mem_owner);
848  case 4:
849  if (elem_ti.is_fp()) {
850  return build_array_target_value<float>(buff, buff_sz, row_set_mem_owner);
851  } else {
852  return build_array_target_value<int32_t>(buff, buff_sz, row_set_mem_owner);
853  }
854  case 8:
855  if (elem_ti.is_fp()) {
856  return build_array_target_value<double>(buff, buff_sz, row_set_mem_owner);
857  } else {
858  return build_array_target_value<int64_t>(buff, buff_sz, row_set_mem_owner);
859  }
860  default:
861  CHECK(false);
862  }
863  CHECK(false);
864  return TargetValue(nullptr);
865 }
866 
867 template <class Tuple, size_t... indices>
868 inline std::vector<std::pair<const int8_t*, const int64_t>> make_vals_vector(
869  std::index_sequence<indices...>,
870  const Tuple& tuple) {
871  return std::vector<std::pair<const int8_t*, const int64_t>>{
872  std::make_pair(std::get<2 * indices>(tuple), std::get<2 * indices + 1>(tuple))...};
873 }
874 
875 inline std::unique_ptr<ArrayDatum> lazy_fetch_chunk(const int8_t* ptr,
876  const int64_t varlen_ptr) {
877  auto ad = std::make_unique<ArrayDatum>();
878  bool is_end;
879  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(ptr)),
880  varlen_ptr,
881  ad.get(),
882  &is_end);
883  CHECK(!is_end);
884  return ad;
885 }
886 
888  template <typename... T>
889  static inline auto fetch(const SQLTypeInfo& geo_ti,
890  const ResultSet::GeoReturnType return_type,
891  T&&... vals) {
892  constexpr int num_vals = sizeof...(vals);
893  static_assert(
894  num_vals % 2 == 0,
895  "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
896  const auto vals_vector = make_vals_vector(std::make_index_sequence<num_vals / 2>{},
897  std::make_tuple(vals...));
898  std::array<VarlenDatumPtr, num_vals / 2> ad_arr;
899  size_t ctr = 0;
900  for (const auto& col_pair : vals_vector) {
901  ad_arr[ctr] = lazy_fetch_chunk(col_pair.first, col_pair.second);
902  // Regular chunk iterator used to fetch this datum sets the right nullness.
903  // That includes the fixlen bounds array.
904  // However it may incorrectly set it for the POINT coord array datum
905  // if 1st byte happened to hold NULL_ARRAY_TINYINT. One should either use
906  // the specialized iterator for POINT coords or rely on regular iterator +
907  // reset + recheck, which is what is done below.
908  auto is_point = (geo_ti.get_type() == kPOINT && ctr == 0);
909  if (is_point) {
910  // Resetting POINT coords array nullness here
911  ad_arr[ctr]->is_null = false;
912  }
913  if (!geo_ti.get_notnull()) {
914  // Recheck and set nullness
915  if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL ||
916  (is_point &&
917  is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length))) {
918  ad_arr[ctr]->is_null = true;
919  }
920  }
921  ctr++;
922  }
923  return ad_arr;
924  }
925 };
926 
927 inline std::unique_ptr<ArrayDatum> fetch_data_from_gpu(int64_t varlen_ptr,
928  const int64_t length,
929  Data_Namespace::DataMgr* data_mgr,
930  const int device_id) {
931  auto cpu_buf =
932  std::shared_ptr<int8_t>(new int8_t[length], std::default_delete<int8_t[]>());
933  auto allocator = std::make_unique<CudaAllocator>(
934  data_mgr, device_id, getQueryEngineCudaStreamForDevice(device_id));
935  allocator->copyFromDevice(cpu_buf.get(), reinterpret_cast<int8_t*>(varlen_ptr), length);
936  // Just fetching the data from gpu, not checking geo nullness
937  return std::make_unique<ArrayDatum>(length, cpu_buf, false);
938 }
939 
941  static inline auto yieldGpuPtrFetcher() {
942  return [](const int64_t ptr, const int64_t length) -> VarlenDatumPtr {
943  // Just fetching the data from gpu, not checking geo nullness
944  return std::make_unique<VarlenDatum>(length, reinterpret_cast<int8_t*>(ptr), false);
945  };
946  }
947 
948  static inline auto yieldGpuDatumFetcher(Data_Namespace::DataMgr* data_mgr_ptr,
949  const int device_id) {
950  return [data_mgr_ptr, device_id](const int64_t ptr,
951  const int64_t length) -> VarlenDatumPtr {
952  return fetch_data_from_gpu(ptr, length, data_mgr_ptr, device_id);
953  };
954  }
955 
956  static inline auto yieldCpuDatumFetcher() {
957  return [](const int64_t ptr, const int64_t length) -> VarlenDatumPtr {
958  // Just fetching the data from gpu, not checking geo nullness
959  return std::make_unique<VarlenDatum>(length, reinterpret_cast<int8_t*>(ptr), false);
960  };
961  }
962 
963  template <typename... T>
964  static inline auto fetch(const SQLTypeInfo& geo_ti,
965  const ResultSet::GeoReturnType return_type,
966  Data_Namespace::DataMgr* data_mgr,
967  const bool fetch_data_from_gpu,
968  const int device_id,
969  T&&... vals) {
970  auto ad_arr_generator = [&](auto datum_fetcher) {
971  constexpr int num_vals = sizeof...(vals);
972  static_assert(
973  num_vals % 2 == 0,
974  "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
975  const auto vals_vector = std::vector<int64_t>{vals...};
976 
977  std::array<VarlenDatumPtr, num_vals / 2> ad_arr;
978  size_t ctr = 0;
979  for (size_t i = 0; i < vals_vector.size(); i += 2, ctr++) {
980  if (vals_vector[i] == 0) {
981  // projected null
982  CHECK(!geo_ti.get_notnull());
983  ad_arr[ctr] = std::make_unique<ArrayDatum>(0, nullptr, true);
984  continue;
985  }
986  ad_arr[ctr] = datum_fetcher(vals_vector[i], vals_vector[i + 1]);
987  // All fetched datums come in with is_null set to false
988  if (!geo_ti.get_notnull()) {
989  bool is_null = false;
990  // Now need to set the nullness
991  if (ad_arr[ctr]->length == 0 || ad_arr[ctr]->pointer == NULL) {
992  is_null = true;
993  } else if (geo_ti.get_type() == kPOINT && ctr == 0 &&
994  is_null_point(geo_ti, ad_arr[ctr]->pointer, ad_arr[ctr]->length)) {
995  is_null = true; // recognizes compressed and uncompressed points
996  } else if (ad_arr[ctr]->length == 4 * sizeof(double)) {
997  // Bounds
998  auto dti = SQLTypeInfo(kARRAY, 0, 0, false, kENCODING_NONE, 0, kDOUBLE);
999  is_null = dti.is_null_fixlen_array(ad_arr[ctr]->pointer, ad_arr[ctr]->length);
1000  }
1001  ad_arr[ctr]->is_null = is_null;
1002  }
1003  }
1004  return ad_arr;
1005  };
1006 
1007  if (fetch_data_from_gpu) {
1009  return ad_arr_generator(yieldGpuPtrFetcher());
1010  } else {
1011  return ad_arr_generator(yieldGpuDatumFetcher(data_mgr, device_id));
1012  }
1013  } else {
1014  return ad_arr_generator(yieldCpuDatumFetcher());
1015  }
1016  }
1017 };
1018 
1019 template <SQLTypes GEO_SOURCE_TYPE, typename GeoTargetFetcher>
1021  template <typename... T>
1022  static inline TargetValue build(const SQLTypeInfo& geo_ti,
1023  const ResultSet::GeoReturnType return_type,
1024  T&&... vals) {
1025  auto ad_arr = GeoTargetFetcher::fetch(geo_ti, return_type, std::forward<T>(vals)...);
1026  static_assert(std::tuple_size<decltype(ad_arr)>::value > 0,
1027  "ArrayDatum array for Geo Target must contain at least one value.");
1028 
1029  // Fetcher sets the geo nullness based on geo typeinfo's notnull, type and
1030  // compression. Serializers will generate appropriate NULL geo where necessary.
1031  switch (return_type) {
1033  if (!geo_ti.get_notnull() && ad_arr[0]->is_null) {
1034  return GeoTargetValue();
1035  }
1037  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1038  ad_arr);
1039  }
1041  if (!geo_ti.get_notnull() && ad_arr[0]->is_null) {
1042  // Generating NULL wkt string to represent NULL geo
1043  return NullableString(nullptr);
1044  }
1046  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1047  ad_arr);
1048  }
1051  if (!geo_ti.get_notnull() && ad_arr[0]->is_null) {
1052  // NULL geo
1053  // Pass along null datum, instead of an empty/null GeoTargetValuePtr
1054  // return GeoTargetValuePtr();
1055  }
1057  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1058  ad_arr);
1059  }
1060  default: {
1061  UNREACHABLE();
1062  return TargetValue(nullptr);
1063  }
1064  }
1065  }
1066 };
1067 
1068 template <typename T>
1069 inline std::pair<int64_t, int64_t> get_frag_id_and_local_idx(
1070  const std::vector<std::vector<T>>& frag_offsets,
1071  const size_t tab_or_col_idx,
1072  const int64_t global_idx) {
1073  CHECK_GE(global_idx, int64_t(0));
1074  for (int64_t frag_id = frag_offsets.size() - 1; frag_id > 0; --frag_id) {
1075  CHECK_LT(tab_or_col_idx, frag_offsets[frag_id].size());
1076  const auto frag_off = static_cast<int64_t>(frag_offsets[frag_id][tab_or_col_idx]);
1077  if (frag_off < global_idx) {
1078  return {frag_id, global_idx - frag_off};
1079  }
1080  }
1081  return {-1, -1};
1082 }
1083 
1084 } // namespace
1085 
1086 const std::vector<const int8_t*>& ResultSet::getColumnFrag(const size_t storage_idx,
1087  const size_t col_logical_idx,
1088  int64_t& global_idx) const {
1089  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1090  if (col_buffers_[storage_idx].size() > 1) {
1091  int64_t frag_id = 0;
1092  int64_t local_idx = global_idx;
1093  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1094  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1095  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1096  } else {
1097  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1098  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1099  CHECK_LE(local_idx, global_idx);
1100  }
1101  CHECK_GE(frag_id, int64_t(0));
1102  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1103  global_idx = local_idx;
1104  return col_buffers_[storage_idx][frag_id];
1105  } else {
1106  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1107  return col_buffers_[storage_idx][0];
1108  }
1109 }
1110 
1111 const VarlenOutputInfo* ResultSet::getVarlenOutputInfo(const size_t entry_idx) const {
1112  auto storage_lookup_result = findStorage(entry_idx);
1113  CHECK(storage_lookup_result.storage_ptr);
1114  return storage_lookup_result.storage_ptr->getVarlenOutputInfo();
1115 }
1116 
1121 void ResultSet::copyColumnIntoBuffer(const size_t column_idx,
1122  int8_t* output_buffer,
1123  const size_t output_buffer_size) const {
1125  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1126  CHECK(output_buffer_size > 0);
1127  CHECK(output_buffer);
1128  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1129  size_t out_buff_offset = 0;
1130 
1131  // the main storage:
1132  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1133  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1134  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1135  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1136  CHECK(crt_buffer_size <= output_buffer_size);
1137  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1138 
1139  out_buff_offset += crt_buffer_size;
1140 
1141  // the appended storages:
1142  for (size_t i = 0; i < appended_storage_.size(); i++) {
1143  const size_t crt_storage_row_count =
1144  appended_storage_[i]->query_mem_desc_.getEntryCount();
1145  if (crt_storage_row_count == 0) {
1146  // skip an empty appended storage
1147  continue;
1148  }
1149  CHECK_LT(out_buff_offset, output_buffer_size);
1150  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1151  const size_t column_offset =
1152  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1153  const int8_t* storage_buffer =
1154  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1155  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1156  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1157 
1158  out_buff_offset += crt_buffer_size;
1159  }
1160 }
1161 
1162 template <typename ENTRY_TYPE, QueryDescriptionType QUERY_TYPE, bool COLUMNAR_FORMAT>
1163 ENTRY_TYPE ResultSet::getEntryAt(const size_t row_idx,
1164  const size_t target_idx,
1165  const size_t slot_idx) const {
1166  if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByPerfectHash) { // NOLINT
1167  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1168  return getColumnarPerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1169  } else {
1170  return getRowWisePerfectHashEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1171  }
1172  } else if constexpr (QUERY_TYPE == QueryDescriptionType::GroupByBaselineHash) {
1173  if constexpr (COLUMNAR_FORMAT) { // NOLINT
1174  return getColumnarBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1175  } else {
1176  return getRowWiseBaselineEntryAt<ENTRY_TYPE>(row_idx, target_idx, slot_idx);
1177  }
1178  } else {
1179  UNREACHABLE() << "Invalid query type is used";
1180  return 0;
1181  }
1182 }
1183 
1184 #define DEF_GET_ENTRY_AT(query_type, columnar_output) \
1185  template DATA_T ResultSet::getEntryAt<DATA_T, query_type, columnar_output>( \
1186  const size_t row_idx, const size_t target_idx, const size_t slot_idx) const;
1187 
1188 #define DATA_T int64_t
1192 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1193 #undef DATA_T
1194 
1195 #define DATA_T int32_t
1197 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByPerfectHash, false)
1198 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, true)
1199 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1200 #undef DATA_T
1201 
1202 #define DATA_T int16_t
1204 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByPerfectHash, false)
1205 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, true)
1206 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1207 #undef DATA_T
1208 
1209 #define DATA_T int8_t
1211 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByPerfectHash, false)
1212 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, true)
1213 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1214 #undef DATA_T
1215 
1216 #define DATA_T float
1218 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByPerfectHash, false)
1219 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, true)
1220 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1221 #undef DATA_T
1222 
1223 #define DATA_T double
1225 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByPerfectHash, false)
1226 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, true)
1227 DEF_GET_ENTRY_AT(QueryDescriptionType::GroupByBaselineHash, false)
1228 #undef DATA_T
1229 
1230 #undef DEF_GET_ENTRY_AT
1231 
1238 template <typename ENTRY_TYPE>
1239 ENTRY_TYPE ResultSet::getColumnarPerfectHashEntryAt(const size_t row_idx,
1240  const size_t target_idx,
1241  const size_t slot_idx) const {
1242  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1243  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1244  return reinterpret_cast<const ENTRY_TYPE*>(storage_buffer)[row_idx];
1245 }
1246 
1253 template <typename ENTRY_TYPE>
1254 ENTRY_TYPE ResultSet::getRowWisePerfectHashEntryAt(const size_t row_idx,
1255  const size_t target_idx,
1256  const size_t slot_idx) const {
1257  const size_t row_offset = storage_->query_mem_desc_.getRowSize() * row_idx;
1258  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(slot_idx);
1259  const int8_t* storage_buffer =
1260  storage_->getUnderlyingBuffer() + row_offset + column_offset;
1261  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1262 }
1263 
1270 template <typename ENTRY_TYPE>
1271 ENTRY_TYPE ResultSet::getRowWiseBaselineEntryAt(const size_t row_idx,
1272  const size_t target_idx,
1273  const size_t slot_idx) const {
1274  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1275  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1276  auto keys_ptr = row_ptr_rowwise(
1277  storage_->getUnderlyingBuffer(), storage_->query_mem_desc_, row_idx);
1278  const auto column_offset =
1279  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1280  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1281  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width;
1282  const auto storage_buffer = keys_ptr + column_offset;
1283  return *reinterpret_cast<const ENTRY_TYPE*>(storage_buffer);
1284 }
1285 
1292 template <typename ENTRY_TYPE>
1293 ENTRY_TYPE ResultSet::getColumnarBaselineEntryAt(const size_t row_idx,
1294  const size_t target_idx,
1295  const size_t slot_idx) const {
1296  CHECK_NE(storage_->query_mem_desc_.targetGroupbyIndicesSize(), size_t(0));
1297  const auto key_width = storage_->query_mem_desc_.getEffectiveKeyWidth();
1298  const auto column_offset =
1299  (storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) < 0)
1300  ? storage_->query_mem_desc_.getColOffInBytes(slot_idx)
1301  : storage_->query_mem_desc_.getTargetGroupbyIndex(target_idx) * key_width *
1302  storage_->query_mem_desc_.getEntryCount();
1303  const auto column_buffer = storage_->getUnderlyingBuffer() + column_offset;
1304  return reinterpret_cast<const ENTRY_TYPE*>(column_buffer)[row_idx];
1305 }
1306 
1307 // Interprets ptr1, ptr2 as the ptr and len pair used for variable length data.
1309  const int8_t compact_sz1,
1310  const int8_t* ptr2,
1311  const int8_t compact_sz2,
1312  const TargetInfo& target_info,
1313  const size_t target_logical_idx,
1314  const bool translate_strings,
1315  const size_t entry_buff_idx) const {
1316  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1317  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1318  if (varlen_ptr < 0) {
1319  CHECK_EQ(-1, varlen_ptr);
1320  if (target_info.sql_type.get_type() == kARRAY) {
1321  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1322  }
1323  return TargetValue(nullptr);
1324  }
1325  const auto storage_idx = getStorageIndex(entry_buff_idx);
1326  if (target_info.sql_type.is_string()) {
1327  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1328  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1329  const auto& varlen_buffer_for_storage =
1330  serialized_varlen_buffer_[storage_idx.first];
1331  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1332  return varlen_buffer_for_storage[varlen_ptr];
1333  } else if (target_info.sql_type.get_type() == kARRAY) {
1334  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1335  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1336  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1337 
1338  return build_array_target_value(
1339  target_info.sql_type,
1340  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1341  varlen_buffer[varlen_ptr].size(),
1342  translate_strings,
1344  catalog_);
1345  } else {
1346  CHECK(false);
1347  }
1348  }
1349  if (!lazy_fetch_info_.empty()) {
1350  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1351  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1352  if (col_lazy_fetch.is_lazily_fetched) {
1353  const auto storage_idx = getStorageIndex(entry_buff_idx);
1354  CHECK_LT(storage_idx.first, col_buffers_.size());
1355  auto& frag_col_buffers =
1356  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1357  bool is_end{false};
1358  auto col_buf = const_cast<int8_t*>(frag_col_buffers[col_lazy_fetch.local_col_id]);
1359  if (target_info.sql_type.is_string()) {
1360  VarlenDatum vd;
1362  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, false, &vd, &is_end);
1363  CHECK(!is_end);
1364  if (vd.is_null) {
1365  return TargetValue(nullptr);
1366  }
1367  CHECK(vd.pointer);
1368  CHECK_GT(vd.length, 0u);
1369  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1370  return fetched_str;
1371  } else {
1372  CHECK(target_info.sql_type.is_array());
1373  ArrayDatum ad;
1374  if (FlatBufferManager::isFlatBuffer(col_buf)) {
1375  FlatBufferManager m{col_buf};
1376  int64_t length;
1377  auto status = m.getItem(varlen_ptr, length, ad.pointer, ad.is_null);
1378  CHECK_EQ(status, FlatBufferManager::Status::Success);
1379  CHECK_GE(length, 0);
1380  ad.length = length;
1381  } else {
1383  reinterpret_cast<ChunkIter*>(col_buf), varlen_ptr, &ad, &is_end);
1384  }
1385  CHECK(!is_end);
1386  if (ad.is_null) {
1387  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1388  }
1389  CHECK_GE(ad.length, 0u);
1390  if (ad.length > 0) {
1391  CHECK(ad.pointer);
1392  }
1393  return build_array_target_value(target_info.sql_type,
1394  ad.pointer,
1395  ad.length,
1396  translate_strings,
1398  catalog_);
1399  }
1400  }
1401  }
1402  if (!varlen_ptr) {
1403  if (target_info.sql_type.is_array()) {
1404  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1405  }
1406  return TargetValue(nullptr);
1407  }
1408  auto length = read_int_from_buff(ptr2, compact_sz2);
1409  if (target_info.sql_type.is_array()) {
1410  const auto& elem_ti = target_info.sql_type.get_elem_type();
1411  length *= elem_ti.get_array_context_logical_size();
1412  }
1413  std::vector<int8_t> cpu_buffer;
1414  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1415  cpu_buffer.resize(length);
1416  const auto executor = query_mem_desc_.getExecutor();
1417  CHECK(executor);
1418  auto data_mgr = executor->getDataMgr();
1419  auto allocator = std::make_unique<CudaAllocator>(
1420  data_mgr, device_id_, getQueryEngineCudaStreamForDevice(device_id_));
1421 
1422  allocator->copyFromDevice(
1423  &cpu_buffer[0], reinterpret_cast<int8_t*>(varlen_ptr), length);
1424  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1425  }
1426  if (target_info.sql_type.is_array()) {
1427  return build_array_target_value(target_info.sql_type,
1428  reinterpret_cast<const int8_t*>(varlen_ptr),
1429  length,
1430  translate_strings,
1432  catalog_);
1433  }
1434  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1435 }
1436 
1437 bool ResultSet::isGeoColOnGpu(const size_t col_idx) const {
1438  // This should match the logic in makeGeoTargetValue which ultimately calls
1439  // fetch_data_from_gpu when the geo column is on the device.
1440  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1441  // utility function that handles this logic in one place
1442  CHECK_LT(col_idx, targets_.size());
1443  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1444  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1445  " is not a geo column. It is of type " +
1446  targets_[col_idx].sql_type.get_type_name() + ".");
1447  }
1448 
1449  const auto& target_info = targets_[col_idx];
1450  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1451  return false;
1452  }
1453 
1454  if (!lazy_fetch_info_.empty()) {
1455  CHECK_LT(col_idx, lazy_fetch_info_.size());
1456  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1457  return false;
1458  }
1459  }
1460 
1462 }
1463 
1464 // Reads a geo value from a series of ptrs to var len types
1465 // In Columnar format, geo_target_ptr is the geo column ptr (a pointer to the beginning
1466 // of that specific geo column) and should be appropriately adjusted with the
1467 // entry_buff_idx
1468 TargetValue ResultSet::makeGeoTargetValue(const int8_t* geo_target_ptr,
1469  const size_t slot_idx,
1470  const TargetInfo& target_info,
1471  const size_t target_logical_idx,
1472  const size_t entry_buff_idx) const {
1473  CHECK(target_info.sql_type.is_geometry());
1474 
1475  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1476  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1477  };
1478 
1479  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1480  const auto storage_info = findStorage(entry_buff_idx);
1481  auto crt_geo_col_ptr = geo_target_ptr;
1482  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1483  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1484  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1485  }
1486  // adjusting the column pointer to represent a pointer to the geo target value
1487  return crt_geo_col_ptr +
1488  storage_info.fixedup_entry_idx *
1489  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1490  slot_idx + range);
1491  };
1492 
1493  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1495  ? getNextTargetBufferColWise(slot_idx, range)
1496  : getNextTargetBufferRowWise(slot_idx, range);
1497  };
1498 
1499  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1500  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1502  };
1503 
1504  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1505  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1507  };
1508 
1509  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1510  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1512  };
1513 
1514  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1515  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1517  };
1518 
1519  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1520  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1522  };
1523 
1524  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1525  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1527  };
1528 
1529  auto getFragColBuffers = [&]() -> decltype(auto) {
1530  const auto storage_idx = getStorageIndex(entry_buff_idx);
1531  CHECK_LT(storage_idx.first, col_buffers_.size());
1532  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1533  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1534  };
1535 
1536  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1537 
1538  auto getDataMgr = [&]() {
1539  auto executor = query_mem_desc_.getExecutor();
1540  CHECK(executor);
1541  return executor->getDataMgr();
1542  };
1543 
1544  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1545  const auto storage_idx = getStorageIndex(entry_buff_idx);
1546  CHECK_LT(storage_idx.first, serialized_varlen_buffer_.size());
1547  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1548  return varlen_buffer;
1549  };
1550 
1551  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1552  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1553  return TargetValue(nullptr);
1554  }
1555 
1556  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1557  if (!lazy_fetch_info_.empty()) {
1558  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1559  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1560  }
1561 
1562  switch (target_info.sql_type.get_type()) {
1563  case kPOINT: {
1564  if (query_mem_desc_.slotIsVarlenOutput(slot_idx)) {
1565  auto varlen_output_info = getVarlenOutputInfo(entry_buff_idx);
1566  CHECK(varlen_output_info);
1567  auto geo_data_ptr = read_int_from_buff(
1568  geo_target_ptr, query_mem_desc_.getPaddedSlotWidthBytes(slot_idx));
1569  auto cpu_data_ptr =
1570  reinterpret_cast<int64_t>(varlen_output_info->computeCpuOffset(geo_data_ptr));
1571  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1572  target_info.sql_type,
1574  /*data_mgr=*/nullptr,
1575  /*is_gpu_fetch=*/false,
1576  device_id_,
1577  cpu_data_ptr,
1578  target_info.sql_type.get_compression() == kENCODING_GEOINT ? 8 : 16);
1579  } else if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1580  const auto& varlen_buffer = getSeparateVarlenStorage();
1581  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1582  varlen_buffer.size());
1583 
1584  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1585  target_info.sql_type,
1587  nullptr,
1588  false,
1589  device_id_,
1590  reinterpret_cast<int64_t>(
1591  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1592  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1593  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1594  const auto& frag_col_buffers = getFragColBuffers();
1595  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1596  target_info.sql_type,
1598  frag_col_buffers[col_lazy_fetch->local_col_id],
1599  getCoordsDataPtr(geo_target_ptr));
1600  } else {
1601  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1602  target_info.sql_type,
1604  is_gpu_fetch ? getDataMgr() : nullptr,
1605  is_gpu_fetch,
1606  device_id_,
1607  getCoordsDataPtr(geo_target_ptr),
1608  getCoordsLength(geo_target_ptr));
1609  }
1610  break;
1611  }
1612  case kMULTIPOINT: {
1613  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1614  const auto& varlen_buffer = getSeparateVarlenStorage();
1615  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1616  varlen_buffer.size());
1617 
1618  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1619  target_info.sql_type,
1621  nullptr,
1622  false,
1623  device_id_,
1624  reinterpret_cast<int64_t>(
1625  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1626  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1627  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1628  const auto& frag_col_buffers = getFragColBuffers();
1629  return GeoTargetValueBuilder<kMULTIPOINT, GeoLazyFetchHandler>::build(
1630  target_info.sql_type,
1632  frag_col_buffers[col_lazy_fetch->local_col_id],
1633  getCoordsDataPtr(geo_target_ptr));
1634  } else {
1635  return GeoTargetValueBuilder<kMULTIPOINT, GeoQueryOutputFetchHandler>::build(
1636  target_info.sql_type,
1638  is_gpu_fetch ? getDataMgr() : nullptr,
1639  is_gpu_fetch,
1640  device_id_,
1641  getCoordsDataPtr(geo_target_ptr),
1642  getCoordsLength(geo_target_ptr));
1643  }
1644  break;
1645  }
1646  case kLINESTRING: {
1647  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1648  const auto& varlen_buffer = getSeparateVarlenStorage();
1649  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1650  varlen_buffer.size());
1651 
1652  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1653  target_info.sql_type,
1655  nullptr,
1656  false,
1657  device_id_,
1658  reinterpret_cast<int64_t>(
1659  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1660  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1661  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1662  const auto& frag_col_buffers = getFragColBuffers();
1663  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1664  target_info.sql_type,
1666  frag_col_buffers[col_lazy_fetch->local_col_id],
1667  getCoordsDataPtr(geo_target_ptr));
1668  } else {
1669  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1670  target_info.sql_type,
1672  is_gpu_fetch ? getDataMgr() : nullptr,
1673  is_gpu_fetch,
1674  device_id_,
1675  getCoordsDataPtr(geo_target_ptr),
1676  getCoordsLength(geo_target_ptr));
1677  }
1678  break;
1679  }
1680  case kMULTILINESTRING: {
1681  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1682  const auto& varlen_buffer = getSeparateVarlenStorage();
1683  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1684  varlen_buffer.size());
1685 
1686  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1687  target_info.sql_type,
1689  nullptr,
1690  false,
1691  device_id_,
1692  reinterpret_cast<int64_t>(
1693  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1694  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1695  reinterpret_cast<int64_t>(
1696  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1697  static_cast<int64_t>(
1698  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1699  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1700  const auto& frag_col_buffers = getFragColBuffers();
1701 
1702  return GeoTargetValueBuilder<kMULTILINESTRING, GeoLazyFetchHandler>::build(
1703  target_info.sql_type,
1705  frag_col_buffers[col_lazy_fetch->local_col_id],
1706  getCoordsDataPtr(geo_target_ptr),
1707  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1708  getCoordsDataPtr(geo_target_ptr));
1709  } else {
1710  return GeoTargetValueBuilder<kMULTILINESTRING, GeoQueryOutputFetchHandler>::build(
1711  target_info.sql_type,
1713  is_gpu_fetch ? getDataMgr() : nullptr,
1714  is_gpu_fetch,
1715  device_id_,
1716  getCoordsDataPtr(geo_target_ptr),
1717  getCoordsLength(geo_target_ptr),
1718  getRingSizesPtr(geo_target_ptr),
1719  getRingSizesLength(geo_target_ptr) * 4);
1720  }
1721  break;
1722  }
1723  case kPOLYGON: {
1724  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1725  const auto& varlen_buffer = getSeparateVarlenStorage();
1726  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1727  varlen_buffer.size());
1728 
1729  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1730  target_info.sql_type,
1732  nullptr,
1733  false,
1734  device_id_,
1735  reinterpret_cast<int64_t>(
1736  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1737  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1738  reinterpret_cast<int64_t>(
1739  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1740  static_cast<int64_t>(
1741  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1742  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1743  const auto& frag_col_buffers = getFragColBuffers();
1744 
1745  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1746  target_info.sql_type,
1748  frag_col_buffers[col_lazy_fetch->local_col_id],
1749  getCoordsDataPtr(geo_target_ptr),
1750  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1751  getCoordsDataPtr(geo_target_ptr));
1752  } else {
1753  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1754  target_info.sql_type,
1756  is_gpu_fetch ? getDataMgr() : nullptr,
1757  is_gpu_fetch,
1758  device_id_,
1759  getCoordsDataPtr(geo_target_ptr),
1760  getCoordsLength(geo_target_ptr),
1761  getRingSizesPtr(geo_target_ptr),
1762  getRingSizesLength(geo_target_ptr) * 4);
1763  }
1764  break;
1765  }
1766  case kMULTIPOLYGON: {
1767  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1768  const auto& varlen_buffer = getSeparateVarlenStorage();
1769  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1770  varlen_buffer.size());
1771 
1772  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1773  target_info.sql_type,
1775  nullptr,
1776  false,
1777  device_id_,
1778  reinterpret_cast<int64_t>(
1779  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1780  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1781  reinterpret_cast<int64_t>(
1782  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1783  static_cast<int64_t>(
1784  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1785  reinterpret_cast<int64_t>(
1786  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1787  static_cast<int64_t>(
1788  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1789  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1790  const auto& frag_col_buffers = getFragColBuffers();
1791 
1792  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1793  target_info.sql_type,
1795  frag_col_buffers[col_lazy_fetch->local_col_id],
1796  getCoordsDataPtr(geo_target_ptr),
1797  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1798  getCoordsDataPtr(geo_target_ptr),
1799  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1800  getCoordsDataPtr(geo_target_ptr));
1801  } else {
1802  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1803  target_info.sql_type,
1805  is_gpu_fetch ? getDataMgr() : nullptr,
1806  is_gpu_fetch,
1807  device_id_,
1808  getCoordsDataPtr(geo_target_ptr),
1809  getCoordsLength(geo_target_ptr),
1810  getRingSizesPtr(geo_target_ptr),
1811  getRingSizesLength(geo_target_ptr) * 4,
1812  getPolyRingsPtr(geo_target_ptr),
1813  getPolyRingsLength(geo_target_ptr) * 4);
1814  }
1815  break;
1816  }
1817  default:
1818  throw std::runtime_error("Unknown Geometry type encountered: " +
1819  target_info.sql_type.get_type_name());
1820  }
1821  UNREACHABLE();
1822  return TargetValue(nullptr);
1823 }
1824 
1825 // Reads an integer or a float from ptr based on the type and the byte width.
1827  const int8_t compact_sz,
1828  const TargetInfo& target_info,
1829  const size_t target_logical_idx,
1830  const bool translate_strings,
1831  const bool decimal_to_double,
1832  const size_t entry_buff_idx) const {
1833  auto actual_compact_sz = compact_sz;
1834  const auto& type_info = target_info.sql_type;
1835  if (type_info.get_type() == kFLOAT && !query_mem_desc_.forceFourByteFloat()) {
1837  actual_compact_sz = sizeof(float);
1838  } else {
1839  actual_compact_sz = sizeof(double);
1840  }
1841  if (target_info.is_agg &&
1842  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1843  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX ||
1844  target_info.agg_kind == kSINGLE_VALUE)) {
1845  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1846  // padded size to 4 bytes to properly read each value.
1847  actual_compact_sz = sizeof(float);
1848  }
1849  }
1850  if (get_compact_type(target_info).is_date_in_days()) {
1851  // Dates encoded in days are converted to 8 byte values on read.
1852  actual_compact_sz = sizeof(int64_t);
1853  }
1854 
1855  // String dictionary keys are read as 32-bit values regardless of encoding
1856  if (type_info.is_string() && type_info.get_compression() == kENCODING_DICT &&
1857  type_info.get_comp_param()) {
1858  actual_compact_sz = sizeof(int32_t);
1859  }
1860 
1861  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1862  const auto& chosen_type = get_compact_type(target_info);
1863  if (!lazy_fetch_info_.empty()) {
1864  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1865  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1866  if (col_lazy_fetch.is_lazily_fetched) {
1867  CHECK_GE(ival, 0);
1868  const auto storage_idx = getStorageIndex(entry_buff_idx);
1869  CHECK_LT(storage_idx.first, col_buffers_.size());
1870  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1871  CHECK_LT(size_t(col_lazy_fetch.local_col_id), frag_col_buffers.size());
1872  ival = result_set::lazy_decode(
1873  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1874  if (chosen_type.is_fp()) {
1875  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1876  if (chosen_type.get_type() == kFLOAT) {
1877  return ScalarTargetValue(static_cast<float>(dval));
1878  } else {
1879  return ScalarTargetValue(dval);
1880  }
1881  }
1882  }
1883  }
1884  if (chosen_type.is_fp()) {
1885  if (target_info.agg_kind == kAPPROX_QUANTILE) {
1886  return *reinterpret_cast<double const*>(ptr) == NULL_DOUBLE
1887  ? NULL_DOUBLE // sql_validate / just_validate
1888  : calculateQuantile(*reinterpret_cast<quantile::TDigest* const*>(ptr));
1889  }
1890  switch (actual_compact_sz) {
1891  case 8: {
1892  const auto dval = *reinterpret_cast<const double*>(ptr);
1893  return chosen_type.get_type() == kFLOAT
1894  ? ScalarTargetValue(static_cast<const float>(dval))
1895  : ScalarTargetValue(dval);
1896  }
1897  case 4: {
1898  CHECK_EQ(kFLOAT, chosen_type.get_type());
1899  return *reinterpret_cast<const float*>(ptr);
1900  }
1901  default:
1902  CHECK(false);
1903  }
1904  }
1905  if (chosen_type.is_integer() || chosen_type.is_boolean() || chosen_type.is_time() ||
1906  chosen_type.is_timeinterval()) {
1907  if (is_distinct_target(target_info)) {
1909  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1910  }
1911  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1912  // right type instead
1913  if (inline_int_null_val(chosen_type) ==
1914  int_resize_cast(ival, chosen_type.get_logical_size())) {
1915  return inline_int_null_val(type_info);
1916  }
1917  return ival;
1918  }
1919  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1920  if (translate_strings) {
1921  if (static_cast<int32_t>(ival) ==
1922  NULL_INT) { // TODO(alex): this isn't nice, fix it
1923  return NullableString(nullptr);
1924  }
1925  StringDictionaryProxy* sdp{nullptr};
1926  if (!chosen_type.get_comp_param()) {
1927  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1928  } else {
1929  sdp = catalog_
1930  ? row_set_mem_owner_->getOrAddStringDictProxy(
1931  chosen_type.get_comp_param(), /*with_generation=*/false, catalog_)
1932  : row_set_mem_owner_->getStringDictProxy(
1933  chosen_type.get_comp_param()); // unit tests bypass the catalog
1934  }
1935  return NullableString(sdp->getString(ival));
1936  } else {
1937  return static_cast<int64_t>(static_cast<int32_t>(ival));
1938  }
1939  }
1940  if (chosen_type.is_decimal()) {
1941  if (decimal_to_double) {
1942  if (target_info.is_agg &&
1943  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1944  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX) &&
1945  ival == inline_int_null_val(SQLTypeInfo(kBIGINT, false))) {
1946  return NULL_DOUBLE;
1947  }
1948  if (!chosen_type.get_notnull() &&
1949  ival ==
1950  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1951  return NULL_DOUBLE;
1952  }
1953  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1954  }
1955  return ival;
1956  }
1957  CHECK(false);
1958  return TargetValue(int64_t(0));
1959 }
1960 
1961 // Gets the TargetValue stored at position local_entry_idx in the col1_ptr and col2_ptr
1962 // column buffers. The second column is only used for AVG.
1963 // the global_entry_idx is passed to makeTargetValue to be used for
1964 // final lazy fetch (if there's any).
1966  const int8_t* col_ptr,
1967  const int8_t* keys_ptr,
1969  const size_t local_entry_idx,
1970  const size_t global_entry_idx,
1971  const TargetInfo& target_info,
1972  const size_t target_logical_idx,
1973  const size_t slot_idx,
1974  const bool translate_strings,
1975  const bool decimal_to_double) const {
1977  const auto col1_ptr = col_ptr;
1978  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1979  const auto next_col_ptr =
1980  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1981  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1982  is_real_str_or_array(target_info))
1983  ? next_col_ptr
1984  : nullptr;
1985  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1986  is_real_str_or_array(target_info))
1987  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1988  : 0;
1989 
1990  // TODO(Saman): add required logics for count distinct
1991  // geospatial target values:
1992  if (target_info.sql_type.is_geometry()) {
1993  return makeGeoTargetValue(
1994  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1995  }
1996 
1997  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1998  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1999  CHECK(col2_ptr);
2000  CHECK(compact_sz2);
2001  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
2002  return target_info.agg_kind == kAVG
2003  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2004  : makeVarlenTargetValue(ptr1,
2005  compact_sz1,
2006  ptr2,
2007  compact_sz2,
2008  target_info,
2009  target_logical_idx,
2010  translate_strings,
2011  global_entry_idx);
2012  }
2014  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2015  return makeTargetValue(ptr1,
2016  compact_sz1,
2017  target_info,
2018  target_logical_idx,
2019  translate_strings,
2020  decimal_to_double,
2021  global_entry_idx);
2022  }
2023  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2024  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
2025  CHECK_GE(key_idx, 0);
2026  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
2027  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
2028  key_width,
2029  target_info,
2030  target_logical_idx,
2031  translate_strings,
2032  decimal_to_double,
2033  global_entry_idx);
2034 }
2035 
2036 // Gets the TargetValue stored in slot_idx (and slot_idx for AVG) of
2037 // rowwise_target_ptr.
2039  int8_t* rowwise_target_ptr,
2040  int8_t* keys_ptr,
2041  const size_t entry_buff_idx,
2042  const TargetInfo& target_info,
2043  const size_t target_logical_idx,
2044  const size_t slot_idx,
2045  const bool translate_strings,
2046  const bool decimal_to_double,
2047  const bool fixup_count_distinct_pointers) const {
2048  if (UNLIKELY(fixup_count_distinct_pointers)) {
2049  if (is_distinct_target(target_info)) {
2050  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
2051  const auto remote_ptr = *count_distinct_ptr_ptr;
2052  if (remote_ptr) {
2053  const auto ptr = storage_->mappedPtr(remote_ptr);
2054  if (ptr) {
2055  *count_distinct_ptr_ptr = ptr;
2056  } else {
2057  // need to create a zero filled buffer for this remote_ptr
2058  const auto& count_distinct_desc =
2059  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
2060  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
2061  ? count_distinct_desc.bitmapSizeBytes()
2062  : count_distinct_desc.bitmapPaddedSizeBytes();
2063  auto count_distinct_buffer = row_set_mem_owner_->allocateCountDistinctBuffer(
2064  bitmap_byte_sz, /*thread_idx=*/0);
2065  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
2066  }
2067  }
2068  }
2069  return int64_t(0);
2070  }
2071  if (target_info.sql_type.is_geometry()) {
2072  return makeGeoTargetValue(
2073  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
2074  }
2075 
2076  auto ptr1 = rowwise_target_ptr;
2077  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2079  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
2080  // Single column perfect hash group by can utilize one slot for both the key and the
2081  // target value if both values fit in 8 bytes. Use the target value actual size for
2082  // this case. If they don't, the target value should be 8 bytes, so we can still use
2083  // the actual size rather than the compact size.
2084  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
2085  }
2086 
2087  // logic for deciding width of column
2088  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
2089  const auto ptr2 =
2090  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
2091  int8_t compact_sz2 = 0;
2092  // Skip reading the second slot if we have a none encoded string and are using
2093  // the none encoded strings buffer attached to ResultSetStorage
2095  (target_info.sql_type.is_array() ||
2096  (target_info.sql_type.is_string() &&
2097  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
2098  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
2099  }
2100  if (separate_varlen_storage_valid_ && target_info.is_agg) {
2101  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
2102  }
2103  CHECK(ptr2);
2104  return target_info.agg_kind == kAVG
2105  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
2106  : makeVarlenTargetValue(ptr1,
2107  compact_sz1,
2108  ptr2,
2109  compact_sz2,
2110  target_info,
2111  target_logical_idx,
2112  translate_strings,
2113  entry_buff_idx);
2114  }
2116  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
2117  return makeTargetValue(ptr1,
2118  compact_sz1,
2119  target_info,
2120  target_logical_idx,
2121  translate_strings,
2122  decimal_to_double,
2123  entry_buff_idx);
2124  }
2125  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
2126  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
2127  return makeTargetValue(ptr1,
2128  key_width,
2129  target_info,
2130  target_logical_idx,
2131  translate_strings,
2132  decimal_to_double,
2133  entry_buff_idx);
2134 }
2135 
2136 // Returns true iff the entry at position entry_idx in buff contains a valid row.
2137 bool ResultSetStorage::isEmptyEntry(const size_t entry_idx, const int8_t* buff) const {
2140  return false;
2141  }
2143  return isEmptyEntryColumnar(entry_idx, buff);
2144  }
2149  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
2150  target_init_vals_.size());
2151  const auto rowwise_target_ptr = row_ptr_rowwise(buff, query_mem_desc_, entry_idx);
2152  const auto target_slot_off = result_set::get_byteoff_of_slot(
2154  return read_int_from_buff(rowwise_target_ptr + target_slot_off,
2157  target_init_vals_[query_mem_desc_.getTargetIdxForKey()];
2158  } else {
2159  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, entry_idx);
2161  case 4:
2164  return *reinterpret_cast<const int32_t*>(keys_ptr) == EMPTY_KEY_32;
2165  case 8:
2166  return *reinterpret_cast<const int64_t*>(keys_ptr) == EMPTY_KEY_64;
2167  default:
2168  CHECK(false);
2169  return true;
2170  }
2171  }
2172 }
2173 
2174 /*
2175  * Returns true if the entry contain empty keys
2176  * This function should only be used with columnar format.
2177  */
2178 bool ResultSetStorage::isEmptyEntryColumnar(const size_t entry_idx,
2179  const int8_t* buff) const {
2183  return false;
2184  }
2186  // For table functions the entry count should always be set to the actual output size
2187  // (i.e. there are not empty entries), so just assume value is non-empty
2188  CHECK_LT(entry_idx, getEntryCount());
2189  return false;
2190  }
2195  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
2196  target_init_vals_.size());
2197  const auto col_buff = advance_col_buff_to_slot(
2199  const auto entry_buff =
2200  col_buff + entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(
2202  return read_int_from_buff(entry_buff,
2205  target_init_vals_[query_mem_desc_.getTargetIdxForKey()];
2206  } else {
2207  // it's enough to find the first group key which is empty
2209  return reinterpret_cast<const int64_t*>(buff)[entry_idx] == EMPTY_KEY_64;
2210  } else {
2212  const auto target_buff = buff + query_mem_desc_.getPrependedGroupColOffInBytes(0);
2213  switch (query_mem_desc_.groupColWidth(0)) {
2214  case 8:
2215  return reinterpret_cast<const int64_t*>(target_buff)[entry_idx] == EMPTY_KEY_64;
2216  case 4:
2217  return reinterpret_cast<const int32_t*>(target_buff)[entry_idx] == EMPTY_KEY_32;
2218  case 2:
2219  return reinterpret_cast<const int16_t*>(target_buff)[entry_idx] == EMPTY_KEY_16;
2220  case 1:
2221  return reinterpret_cast<const int8_t*>(target_buff)[entry_idx] == EMPTY_KEY_8;
2222  default:
2223  CHECK(false);
2224  }
2225  }
2226  return false;
2227  }
2228  return false;
2229 }
2230 
2231 namespace {
2232 
2233 template <typename T>
2234 inline size_t make_bin_search(size_t l, size_t r, T&& is_empty_fn) {
2235  // Avoid search if there are no empty keys.
2236  if (!is_empty_fn(r - 1)) {
2237  return r;
2238  }
2239 
2240  --r;
2241  while (l != r) {
2242  size_t c = (l + r) / 2;
2243  if (is_empty_fn(c)) {
2244  r = c;
2245  } else {
2246  l = c + 1;
2247  }
2248  }
2249 
2250  return r;
2251 }
2252 
2253 } // namespace
2254 
2256  // Note that table function result sets should never use this path as the row count
2257  // can be known statically (as the output buffers do not contain empty entries)
2260 
2261  if (!query_mem_desc_.getEntryCount()) {
2262  return 0;
2263  }
2264 
2266  return make_bin_search(0, query_mem_desc_.getEntryCount(), [this](size_t idx) {
2267  return reinterpret_cast<const int64_t*>(buff_)[idx] == EMPTY_KEY_64;
2268  });
2269  } else {
2270  return make_bin_search(0, query_mem_desc_.getEntryCount(), [this](size_t idx) {
2271  const auto keys_ptr = row_ptr_rowwise(buff_, query_mem_desc_, idx);
2272  return *reinterpret_cast<const int64_t*>(keys_ptr) == EMPTY_KEY_64;
2273  });
2274  }
2275 }
2276 
2277 bool ResultSetStorage::isEmptyEntry(const size_t entry_idx) const {
2278  return isEmptyEntry(entry_idx, buff_);
2279 }
2280 
2282  const InternalTargetValue& val,
2283  const bool float_argument_input) {
2284  if (ti.get_notnull()) {
2285  return false;
2286  }
2287  if (val.isInt()) {
2288  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
2289  }
2290  if (val.isPair()) {
2291  return !val.i2;
2292  }
2293  if (val.isStr()) {
2294  return !val.i1;
2295  }
2296  CHECK(val.isNull());
2297  return true;
2298 }
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
#define CHECK_EQ(x, y)
Definition: Logger.h:230
bool slotIsVarlenOutput(const size_t slot_idx) const
std::pair< size_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:914
#define NULL_DOUBLE
Permutation permutation_
Definition: ResultSet.h:910
bool isEmptyEntry(const size_t entry_idx, const int8_t *buff) const
#define EMPTY_KEY_64
ENTRY_TYPE getRowWisePerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:132
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
bool isPair() const
Definition: TargetValue.h:65
AppendedStorage appended_storage_
Definition: ResultSet.h:904
ENTRY_TYPE getColumnarPerfectHashEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
int64_t getTargetGroupbyIndex(const size_t target_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:944
bool isEmptyEntryColumnar(const size_t entry_idx, const int8_t *buff) const
ExecutorDeviceType
bool isStr() const
Definition: TargetValue.h:69
bool is_null
Definition: sqltypes.h:173
bool isLogicalSizedColumnsAllowed() const
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
SQLTypeInfo sql_type
Definition: TargetInfo.h:52
TargetValue build_string_array_target_value(const int32_t *buff, const size_t buff_sz, const int dict_id, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Catalog_Namespace::Catalog *catalog)
size_t make_bin_search(size_t l, size_t r, T &&is_empty_fn)
std::unique_ptr< ArrayDatum > lazy_fetch_chunk(const int8_t *ptr, const int64_t varlen_ptr)
const Catalog_Namespace::Catalog * catalog_
Definition: ResultSet.h:912
std::vector< TargetValue > getNextRow(const bool translate_strings, const bool decimal_to_double) const
static bool isNull(const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:902
#define UNREACHABLE()
Definition: Logger.h:266
#define CHECK_GE(x, y)
Definition: Logger.h:235
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:903
bool is_null_point(const SQLTypeInfo &geo_ti, const int8_t *coords, const size_t coords_sz)
std::string getString(int32_t string_id) const
High-level representation of SQL values.
ENTRY_TYPE getEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
size_t getEffectiveKeyWidth() const
Constants for Builtin SQL Types supported by HEAVY.AI.
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t keep_first_
Definition: ResultSet.h:908
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:111
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:935
int64_t lazyReadInt(const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:404
OneIntegerColumnRow getOneColRow(const size_t index) const
TargetValue getTargetValueFromBufferColwise(const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
T advance_target_ptr_row_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
#define CHECK_GT(x, y)
Definition: Logger.h:234
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:182
TargetValue build_array_target_value(const int8_t *buff, const size_t buff_sz, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
std::string to_string(char const *&&v)
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:53
int8_t * pointer
Definition: sqltypes.h:172
#define NULL_INT
const ResultSet * result_set_
Definition: ResultSet.h:767
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
const int8_t * advance_col_buff_to_slot(const int8_t *buff, const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const size_t slot_idx, const bool separate_varlen_storage)
Definition: sqldefs.h:74
Serialization routines for geospatial types.
std::conditional_t< is_cuda_compiler(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:228
const SQLTypeInfo get_compact_type(const TargetInfo &target)
size_t global_entry_idx_
Definition: ResultSet.h:126
InternalTargetValue getVarlenOrderEntry(const int64_t str_ptr, const size_t str_len) const
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:899
int8_t groupColWidth(const size_t key_idx) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:909
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
size_t drop_first_
Definition: ResultSet.h:907
bool is_agg
Definition: TargetInfo.h:50
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
CONSTEXPR DEVICE bool is_null(const T &value)
static TargetValue build(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
Classes representing a parse tree.
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
size_t getGroupbyColCount() const
#define CHECK_NE(x, y)
Definition: Logger.h:231
static bool isFlatBuffer(const void *buffer)
Definition: FlatBuffer.h:184
size_t targetGroupbyIndicesSize() const
size_t binSearchRowCount() const
boost::optional< std::vector< ScalarTargetValue >> ArrayTargetValue
Definition: TargetValue.h:181
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
CountDistinctDescriptors count_distinct_descriptors_
Definition: sqldefs.h:76
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:939
boost::optional< boost::variant< GeoPointTargetValue, GeoMultiPointTargetValue, GeoLineStringTargetValue, GeoMultiLineStringTargetValue, GeoPolyTargetValue, GeoMultiPolyTargetValue >> GeoTargetValue
Definition: TargetValue.h:187
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:107
bool isNull() const
Definition: TargetValue.h:67
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
#define EMPTY_KEY_8
void copyColumnIntoBuffer(const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
Status getItem(int64_t index, int64_t &size, int8_t *&dest, bool &is_null)
Definition: FlatBuffer.h:595
bool g_enable_smem_group_by true
static double calculateQuantile(quantile::TDigest *const t_digest)
Definition: ResultSet.cpp:1008
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
SQLAgg agg_kind
Definition: TargetInfo.h:51
const VarlenOutputInfo * getVarlenOutputInfo(const size_t entry_idx) const
QueryDescriptionType getQueryDescriptionType() const
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:499
#define UNLIKELY(x)
Definition: likely.h:25
std::vector< TargetValue > getRowAt(const size_t index) const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
#define CHECK_LT(x, y)
Definition: Logger.h:232
bool is_real_str_or_array(const TargetInfo &target_info)
bool isSingleColumnGroupByWithPerfectHash() const
#define CHECK_LE(x, y)
Definition: Logger.h:233
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:412
bool is_date_in_days() const
Definition: sqltypes.h:998
int64_t int_resize_cast(const int64_t ival, const size_t sz)
int get_array_context_logical_size() const
Definition: sqltypes.h:698
bool isGeoColOnGpu(const size_t col_idx) const
#define EMPTY_KEY_16
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:923
#define DEF_GET_ENTRY_AT(query_type, columnar_output)
bool isRowAtEmpty(const size_t index) const
size_t entryCount() const
Returns the number of entries the result set is allocated to hold.
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, Data_Namespace::DataMgr *data_mgr, const bool fetch_data_from_gpu, const int device_id, T &&...vals)
std::string get_type_name() const
Definition: sqltypes.h:528
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:179
CUstream getQueryEngineCudaStreamForDevice(int device_num)
Definition: QueryEngine.cpp:7
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
static auto yieldGpuDatumFetcher(Data_Namespace::DataMgr *data_mgr_ptr, const int device_id)
const bool is_lazily_fetched
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:925
const ExecutorDeviceType device_type_
Definition: ResultSet.h:900
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
bool isInt() const
Definition: TargetValue.h:63
bool g_enable_watchdog false
Definition: Execute.cpp:79
static auto fetch(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&...vals)
#define CHECK(condition)
Definition: Logger.h:222
bool is_geometry() const
Definition: sqltypes.h:612
ScalarTargetValue make_scalar_tv(const T val)
size_t getBufferSizeBytes(const ExecutorDeviceType device_type) const
#define EMPTY_KEY_32
std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:922
uint64_t exp_to_scale(const unsigned exp)
size_t crt_row_buff_idx_
Definition: ResultSet.h:905
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
QueryDescriptionType
Definition: Types.h:29
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:924
Basic constructors and methods of the row set interface.
bool separate_varlen_storage_valid_
Definition: ResultSet.h:936
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:195
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
std::vector< std::pair< const int8_t *, const int64_t > > make_vals_vector(std::index_sequence< indices...>, const Tuple &tuple)
T advance_target_ptr_col_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
size_t advanceCursorToNextEntry() const
bool is_string() const
Definition: sqltypes.h:600
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:411
ENTRY_TYPE getColumnarBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
Definition: sqldefs.h:75
size_t crt_row_buff_idx_
Definition: ResultSet.h:125
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
std::vector< std::vector< TargetOffsets > > offsets_for_storage_
Definition: ResultSet.h:765
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:981
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
Definition: sqldefs.h:73
bool global_entry_idx_valid_
Definition: ResultSet.h:127
std::unique_ptr< VarlenDatum > VarlenDatumPtr
#define IS_GEO(T)
Definition: sqltypes.h:323
bool isDirectColumnarConversionPossible() const
Definition: ResultSet.cpp:1371
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
bool is_array() const
Definition: sqltypes.h:608
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
const Executor * getExecutor() const
std::unique_ptr< ArrayDatum > fetch_data_from_gpu(int64_t varlen_ptr, const int64_t length, Data_Namespace::DataMgr *data_mgr, const int device_id)
ENTRY_TYPE getRowWiseBaselineEntryAt(const size_t row_idx, const size_t target_idx, const size_t slot_idx) const
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:180
int32_t getTargetIdxForKey() const
size_t length
Definition: sqltypes.h:171
size_t getPrependedGroupColOffInBytes(const size_t group_idx) const
const int device_id_
Definition: ResultSet.h:901