OmniSciDB  c07336695a
ResultSetIteration.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
25 #include "../Shared/geo_types.h"
26 #include "../Shared/likely.h"
27 #include "Execute.h"
28 #include "ParserNode.h"
30 #include "ResultSet.h"
32 #include "RuntimeFunctions.h"
33 #include "Shared/SqlTypesLayout.h"
34 #include "Shared/sqltypes.h"
35 #include "TypePunning.h"
36 
37 #include <memory>
38 #include <utility>
39 
40 namespace {
41 
42 // Interprets ptr1, ptr2 as the sum and count pair used for AVG.
44  const int8_t compact_sz1,
45  const int8_t* ptr2,
46  const int8_t compact_sz2,
47  const TargetInfo& target_info) {
48  int64_t sum{0};
49  CHECK(target_info.agg_kind == kAVG);
50  const bool float_argument_input = takes_float_argument(target_info);
51  const auto actual_compact_sz1 = float_argument_input ? sizeof(float) : compact_sz1;
52  if (target_info.agg_arg_type.is_integer() || target_info.agg_arg_type.is_decimal()) {
53  sum = read_int_from_buff(ptr1, actual_compact_sz1);
54  } else if (target_info.agg_arg_type.is_fp()) {
55  switch (actual_compact_sz1) {
56  case 8: {
57  double d = *reinterpret_cast<const double*>(ptr1);
58  sum = *reinterpret_cast<const int64_t*>(may_alias_ptr(&d));
59  break;
60  }
61  case 4: {
62  double d = *reinterpret_cast<const float*>(ptr1);
63  sum = *reinterpret_cast<const int64_t*>(may_alias_ptr(&d));
64  break;
65  }
66  default:
67  CHECK(false);
68  }
69  } else {
70  CHECK(false);
71  }
72  const auto count = read_int_from_buff(ptr2, compact_sz2);
73  return pair_to_double({sum, count}, target_info.sql_type, false);
74 }
75 
76 // Given the entire buffer for the result set, buff, finds the beginning of the
77 // column for slot_idx. Only makes sense for column-wise representation.
78 const int8_t* advance_col_buff_to_slot(const int8_t* buff,
79  const QueryMemoryDescriptor& query_mem_desc,
80  const std::vector<TargetInfo>& targets,
81  const size_t slot_idx,
82  const bool separate_varlen_storage) {
83  auto crt_col_ptr = get_cols_ptr(buff, query_mem_desc);
84  const auto buffer_col_count = query_mem_desc.getBufferColSlotCount();
85  size_t agg_col_idx{0};
86  for (size_t target_idx = 0; target_idx < targets.size(); ++target_idx) {
87  if (agg_col_idx == slot_idx) {
88  return crt_col_ptr;
89  }
90  CHECK_LT(agg_col_idx, buffer_col_count);
91  const auto& agg_info = targets[target_idx];
92  crt_col_ptr =
93  advance_to_next_columnar_target_buff(crt_col_ptr, query_mem_desc, agg_col_idx);
94  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
95  if (agg_col_idx + 1 == slot_idx) {
96  return crt_col_ptr;
97  }
99  crt_col_ptr, query_mem_desc, agg_col_idx + 1);
100  }
101  agg_col_idx = advance_slot(agg_col_idx, agg_info, separate_varlen_storage);
102  }
103  CHECK(false);
104  return nullptr;
105 }
106 } // namespace
107 
108 // Gets the byte offset, starting from the beginning of the row targets buffer, of
109 // the value in position slot_idx (only makes sense for row-wise representation).
110 size_t get_byteoff_of_slot(const size_t slot_idx,
111  const QueryMemoryDescriptor& query_mem_desc) {
112  return query_mem_desc.getPaddedColWidthForRange(0, slot_idx);
113 }
114 
115 std::vector<TargetValue> ResultSet::getRowAt(
116  const size_t global_entry_idx,
117  const bool translate_strings,
118  const bool decimal_to_double,
119  const bool fixup_count_distinct_pointers,
120  const std::vector<bool>& targets_to_skip /* = {}*/) const {
121  const auto storage_lookup_result =
122  fixup_count_distinct_pointers
123  ? StorageLookupResult{storage_.get(), global_entry_idx, 0}
124  : findStorage(global_entry_idx);
125  const auto storage = storage_lookup_result.storage_ptr;
126  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
127  if (!fixup_count_distinct_pointers && storage->isEmptyEntry(local_entry_idx)) {
128  return {};
129  }
130 
131  const auto buff = storage->buff_;
132  CHECK(buff);
133  std::vector<TargetValue> row;
134  size_t agg_col_idx = 0;
135  int8_t* rowwise_target_ptr{nullptr};
136  int8_t* keys_ptr{nullptr};
137  const int8_t* crt_col_ptr{nullptr};
138  if (query_mem_desc_.didOutputColumnar()) {
139  keys_ptr = buff;
140  crt_col_ptr = get_cols_ptr(buff, storage->query_mem_desc_);
141  } else {
142  keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
143  const auto key_bytes_with_padding =
144  align_to_int64(get_key_bytes_rowwise(query_mem_desc_));
145  rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
146  }
147  for (size_t target_idx = 0; target_idx < storage_->targets_.size(); ++target_idx) {
148  const auto& agg_info = storage_->targets_[target_idx];
149  if (query_mem_desc_.didOutputColumnar()) {
150  if (!targets_to_skip.empty()) {
151  row.push_back(!targets_to_skip[target_idx]
152  ? getTargetValueFromBufferColwise(crt_col_ptr,
153  keys_ptr,
154  storage->query_mem_desc_,
155  local_entry_idx,
156  global_entry_idx,
157  agg_info,
158  target_idx,
159  agg_col_idx,
160  translate_strings,
162  : nullptr);
163  } else {
164  row.push_back(getTargetValueFromBufferColwise(crt_col_ptr,
165  keys_ptr,
166  storage->query_mem_desc_,
167  local_entry_idx,
168  global_entry_idx,
169  agg_info,
170  target_idx,
171  agg_col_idx,
172  translate_strings,
174  }
175  crt_col_ptr = advance_target_ptr_col_wise(crt_col_ptr,
176  agg_info,
177  agg_col_idx,
178  storage->query_mem_desc_,
179  separate_varlen_storage_valid_);
180  } else {
181  row.push_back(getTargetValueFromBufferRowwise(rowwise_target_ptr,
182  keys_ptr,
183  global_entry_idx,
184  agg_info,
185  target_idx,
186  agg_col_idx,
187  translate_strings,
189  fixup_count_distinct_pointers));
190  rowwise_target_ptr = advance_target_ptr_row_wise(rowwise_target_ptr,
191  agg_info,
192  agg_col_idx,
193  query_mem_desc_,
194  separate_varlen_storage_valid_);
195  }
196  agg_col_idx = advance_slot(agg_col_idx, agg_info, separate_varlen_storage_valid_);
197  }
198 
199  return row;
200 }
201 
202 TargetValue ResultSet::getRowAt(const size_t row_idx,
203  const size_t col_idx,
204  const bool translate_strings,
205  const bool decimal_to_double /* = true */) const {
206  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
207  moveToBegin();
208  for (size_t i = 0; i < row_idx; ++i) {
209  auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
210  CHECK(!crt_row.empty());
211  }
212  auto crt_row = getNextRowUnlocked(translate_strings, decimal_to_double);
213  CHECK(!crt_row.empty());
214  return crt_row[col_idx];
215 }
216 
217 OneIntegerColumnRow ResultSet::getOneColRow(const size_t global_entry_idx) const {
218  const auto storage_lookup_result = findStorage(global_entry_idx);
219  const auto storage = storage_lookup_result.storage_ptr;
220  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
221  if (storage->isEmptyEntry(local_entry_idx)) {
222  return {0, false};
223  }
224  const auto buff = storage->buff_;
225  CHECK(buff);
226  CHECK(!query_mem_desc_.didOutputColumnar());
227  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, local_entry_idx);
228  const auto key_bytes_with_padding =
229  align_to_int64(get_key_bytes_rowwise(query_mem_desc_));
230  const auto rowwise_target_ptr = keys_ptr + key_bytes_with_padding;
231  const auto tv = getTargetValueFromBufferRowwise(rowwise_target_ptr,
232  keys_ptr,
233  global_entry_idx,
234  targets_.front(),
235  0,
236  0,
237  false,
238  false,
239  false);
240  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
241  CHECK(scalar_tv);
242  const auto ival_ptr = boost::get<int64_t>(scalar_tv);
243  CHECK(ival_ptr);
244  return {*ival_ptr, true};
245 }
246 
247 std::vector<TargetValue> ResultSet::getRowAt(const size_t logical_index) const {
248  if (logical_index >= entryCount()) {
249  return {};
250  }
251  const auto entry_idx =
252  permutation_.empty() ? logical_index : permutation_[logical_index];
253  return getRowAt(entry_idx, true, false, false);
254 }
255 
256 std::vector<TargetValue> ResultSet::getRowAtNoTranslations(
257  const size_t logical_index,
258  const std::vector<bool>& targets_to_skip /* = {}*/) const {
259  if (logical_index >= entryCount()) {
260  return {};
261  }
262  const auto entry_idx =
263  permutation_.empty() ? logical_index : permutation_[logical_index];
264  return getRowAt(entry_idx, false, false, false, targets_to_skip);
265 }
266 
267 bool ResultSet::isRowAtEmpty(const size_t logical_index) const {
268  if (logical_index >= entryCount()) {
269  return true;
270  }
271  const auto entry_idx =
272  permutation_.empty() ? logical_index : permutation_[logical_index];
273  const auto storage_lookup_result = findStorage(entry_idx);
274  const auto storage = storage_lookup_result.storage_ptr;
275  const auto local_entry_idx = storage_lookup_result.fixedup_entry_idx;
276  return storage->isEmptyEntry(local_entry_idx);
277 }
278 
279 std::vector<TargetValue> ResultSet::getNextRow(const bool translate_strings,
280  const bool decimal_to_double) const {
281  std::lock_guard<std::mutex> lock(row_iteration_mutex_);
282  if (!storage_ && !just_explain_) {
283  return {};
284  }
285  return getNextRowUnlocked(translate_strings, decimal_to_double);
286 }
287 
288 std::vector<TargetValue> ResultSet::getNextRowUnlocked(
289  const bool translate_strings,
290  const bool decimal_to_double) const {
291  if (just_explain_) {
292  if (fetched_so_far_) {
293  return {};
294  }
295  fetched_so_far_ = 1;
296  return {explanation_};
297  }
298  while (fetched_so_far_ < drop_first_) {
299  const auto row = getNextRowImpl(translate_strings, decimal_to_double);
300  if (row.empty()) {
301  return row;
302  }
303  }
304  return getNextRowImpl(translate_strings, decimal_to_double);
305 }
306 
307 std::vector<TargetValue> ResultSet::getNextRowImpl(const bool translate_strings,
308  const bool decimal_to_double) const {
309  auto entry_buff_idx = advanceCursorToNextEntry();
310  if (keep_first_ && fetched_so_far_ >= drop_first_ + keep_first_) {
311  return {};
312  }
313 
314  if (crt_row_buff_idx_ >= entryCount()) {
315  CHECK_EQ(entryCount(), crt_row_buff_idx_);
316  return {};
317  }
318  auto row = getRowAt(entry_buff_idx, translate_strings, decimal_to_double, false);
319  CHECK(!row.empty());
320  ++crt_row_buff_idx_;
321  ++fetched_so_far_;
322 
323  return row;
324 }
325 
326 namespace {
327 
328 const int8_t* columnar_elem_ptr(const size_t entry_idx,
329  const int8_t* col1_ptr,
330  const int8_t compact_sz1) {
331  return col1_ptr + compact_sz1 * entry_idx;
332 }
333 
334 int64_t int_resize_cast(const int64_t ival, const size_t sz) {
335  switch (sz) {
336  case 8:
337  return ival;
338  case 4:
339  return static_cast<int32_t>(ival);
340  case 2:
341  return static_cast<int16_t>(ival);
342  case 1:
343  return static_cast<int8_t>(ival);
344  default:
345  UNREACHABLE();
346  }
347  UNREACHABLE();
348  return 0;
349 }
350 
351 } // namespace
352 
354  // Compute offsets for base storage and all appended storage
355  for (size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
356  ++storage_idx) {
357  offsets_for_storage_.emplace_back();
358 
359  const int8_t* rowwise_target_ptr{0};
360 
361  size_t agg_col_idx = 0;
362  for (size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
363  ++target_idx) {
364  const auto& agg_info = result_set_->storage_->targets_[target_idx];
365 
366  auto ptr1 = rowwise_target_ptr;
367  const auto compact_sz1 =
368  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
369  ? result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx)
370  : key_width_;
371 
372  const int8_t* ptr2{nullptr};
373  int8_t compact_sz2{0};
374  if ((agg_info.is_agg && agg_info.agg_kind == kAVG)) {
375  ptr2 = ptr1 + compact_sz1;
376  compact_sz2 =
377  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
378  } else if (is_real_str_or_array(agg_info)) {
379  ptr2 = ptr1 + compact_sz1;
380  if (!result_set_->separate_varlen_storage_valid_) {
381  // None encoded strings explicitly attached to ResultSetStorage do not have a
382  // second slot in the QueryMemoryDescriptor col width vector
383  compact_sz2 =
384  result_set_->query_mem_desc_.getPaddedSlotWidthBytes(agg_col_idx + 1);
385  }
386  }
387  offsets_for_storage_[storage_idx].push_back(
388  TargetOffsets{ptr1,
389  static_cast<size_t>(compact_sz1),
390  ptr2,
391  static_cast<size_t>(compact_sz2)});
392  rowwise_target_ptr =
393  advance_target_ptr_row_wise(rowwise_target_ptr,
394  agg_info,
395  agg_col_idx,
396  result_set_->query_mem_desc_,
397  result_set_->separate_varlen_storage_valid_);
398 
399  agg_col_idx = advance_slot(
400  agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
401  }
402  CHECK_EQ(offsets_for_storage_[storage_idx].size(),
403  result_set_->storage_->targets_.size());
404  }
405 }
406 
408  const int8_t* buff,
409  const size_t entry_idx,
410  const size_t target_logical_idx,
411  const StorageLookupResult& storage_lookup_result) const {
412  CHECK(buff);
413  const int8_t* rowwise_target_ptr{nullptr};
414  const int8_t* keys_ptr{nullptr};
415 
416  const size_t storage_idx = storage_lookup_result.storage_idx;
417 
418  CHECK_LT(storage_idx, offsets_for_storage_.size());
419  CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
420 
421  const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
422  const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
423 
424  keys_ptr = get_rowwise_ptr(buff, entry_idx);
425  rowwise_target_ptr = keys_ptr + key_bytes_with_padding_;
426  auto ptr1 = rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr1);
427  if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
428  if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
429  ptr1 = keys_ptr +
430  result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
431  key_width_;
432  }
433  }
434  const auto i1 =
435  result_set_->lazyReadInt(read_int_from_buff(ptr1, offsets_for_target.compact_sz1),
436  target_logical_idx,
437  storage_lookup_result);
438 
439  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
440  CHECK(offsets_for_target.ptr2);
441  const auto ptr2 =
442  rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr2);
443  const auto i2 = read_int_from_buff(ptr2, offsets_for_target.compact_sz2);
444  return InternalTargetValue(i1, i2);
445  } else {
446  if (agg_info.sql_type.is_string() &&
447  agg_info.sql_type.get_compression() == kENCODING_NONE) {
448  CHECK(!agg_info.is_agg);
449  if (!result_set_->lazy_fetch_info_.empty()) {
450  CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
451  const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
452  if (col_lazy_fetch.is_lazily_fetched) {
453  return InternalTargetValue(reinterpret_cast<const std::string*>(i1));
454  }
455  }
456  if (result_set_->separate_varlen_storage_valid_) {
457  if (i1 < 0) {
458  CHECK_EQ(-1, i1);
459  return InternalTargetValue(static_cast<const std::string*>(nullptr));
460  }
461  CHECK_LT(storage_lookup_result.storage_idx,
462  result_set_->serialized_varlen_buffer_.size());
463  const auto& varlen_buffer_for_fragment =
464  result_set_->serialized_varlen_buffer_[storage_lookup_result.storage_idx];
465  CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
466  return InternalTargetValue(&varlen_buffer_for_fragment[i1]);
467  }
468  CHECK(offsets_for_target.ptr2);
469  const auto ptr2 =
470  rowwise_target_ptr + reinterpret_cast<size_t>(offsets_for_target.ptr2);
471  const auto str_len = read_int_from_buff(ptr2, offsets_for_target.compact_sz2);
472  CHECK_GE(str_len, 0);
473  return result_set_->getVarlenOrderEntry(i1, str_len);
474  }
475  return InternalTargetValue(
476  agg_info.sql_type.is_fp()
477  ? i1
478  : int_resize_cast(i1, agg_info.sql_type.get_logical_size()));
479  }
480 }
481 
483  // Compute offsets for base storage and all appended storage
484  const auto key_width = result_set_->query_mem_desc_.getEffectiveKeyWidth();
485  for (size_t storage_idx = 0; storage_idx < result_set_->appended_storage_.size() + 1;
486  ++storage_idx) {
487  offsets_for_storage_.emplace_back();
488 
489  const int8_t* buff = storage_idx == 0
490  ? result_set_->storage_->buff_
491  : result_set_->appended_storage_[storage_idx - 1]->buff_;
492  CHECK(buff);
493 
494  const auto& crt_query_mem_desc =
495  storage_idx == 0
496  ? result_set_->storage_->query_mem_desc_
497  : result_set_->appended_storage_[storage_idx - 1]->query_mem_desc_;
498  const int8_t* crt_col_ptr = get_cols_ptr(buff, crt_query_mem_desc);
499 
500  size_t agg_col_idx = 0;
501  for (size_t target_idx = 0; target_idx < result_set_->storage_->targets_.size();
502  ++target_idx) {
503  const auto& agg_info = result_set_->storage_->targets_[target_idx];
504 
505  const auto compact_sz1 =
506  crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
507  ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx)
508  : key_width;
509 
510  const auto next_col_ptr = advance_to_next_columnar_target_buff(
511  crt_col_ptr, crt_query_mem_desc, agg_col_idx);
512  const bool uses_two_slots = (agg_info.is_agg && agg_info.agg_kind == kAVG) ||
513  is_real_str_or_array(agg_info);
514  const auto col2_ptr = uses_two_slots ? next_col_ptr : nullptr;
515  const auto compact_sz2 =
516  (agg_info.is_agg && agg_info.agg_kind == kAVG) || is_real_str_or_array(agg_info)
517  ? crt_query_mem_desc.getPaddedSlotWidthBytes(agg_col_idx + 1)
518  : 0;
519 
520  offsets_for_storage_[storage_idx].push_back(
521  TargetOffsets{crt_col_ptr,
522  static_cast<size_t>(compact_sz1),
523  col2_ptr,
524  static_cast<size_t>(compact_sz2)});
525 
526  crt_col_ptr = next_col_ptr;
527  if (uses_two_slots) {
529  crt_col_ptr, crt_query_mem_desc, agg_col_idx + 1);
530  }
531  agg_col_idx = advance_slot(
532  agg_col_idx, agg_info, result_set_->separate_varlen_storage_valid_);
533  }
534  CHECK_EQ(offsets_for_storage_[storage_idx].size(),
535  result_set_->storage_->targets_.size());
536  }
537 }
538 
540  const int8_t* buff,
541  const size_t entry_idx,
542  const size_t target_logical_idx,
543  const StorageLookupResult& storage_lookup_result) const {
544  const size_t storage_idx = storage_lookup_result.storage_idx;
545 
546  CHECK_LT(storage_idx, offsets_for_storage_.size());
547  CHECK_LT(target_logical_idx, offsets_for_storage_[storage_idx].size());
548 
549  const auto& offsets_for_target = offsets_for_storage_[storage_idx][target_logical_idx];
550  const auto& agg_info = result_set_->storage_->targets_[target_logical_idx];
551  auto ptr1 = offsets_for_target.ptr1;
552  if (result_set_->query_mem_desc_.targetGroupbyIndicesSize() > 0) {
553  if (result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) >= 0) {
554  ptr1 =
555  buff + result_set_->query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) *
556  result_set_->query_mem_desc_.getEffectiveKeyWidth() *
557  result_set_->query_mem_desc_.entry_count_;
558  }
559  }
560 
561  const auto i1 = result_set_->lazyReadInt(
563  columnar_elem_ptr(entry_idx, ptr1, offsets_for_target.compact_sz1),
564  offsets_for_target.compact_sz1),
565  target_logical_idx,
566  storage_lookup_result);
567  if (agg_info.is_agg && agg_info.agg_kind == kAVG) {
568  CHECK(offsets_for_target.ptr2);
569  const auto i2 = read_int_from_buff(
571  entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
572  offsets_for_target.compact_sz2);
573  return InternalTargetValue(i1, i2);
574  } else {
575  // for TEXT ENCODING NONE:
576  if (agg_info.sql_type.is_string() &&
577  agg_info.sql_type.get_compression() == kENCODING_NONE) {
578  CHECK(!agg_info.is_agg);
579  if (!result_set_->lazy_fetch_info_.empty()) {
580  CHECK_LT(target_logical_idx, result_set_->lazy_fetch_info_.size());
581  const auto& col_lazy_fetch = result_set_->lazy_fetch_info_[target_logical_idx];
582  if (col_lazy_fetch.is_lazily_fetched) {
583  return InternalTargetValue(reinterpret_cast<const std::string*>(i1));
584  }
585  }
586  if (result_set_->separate_varlen_storage_valid_) {
587  if (i1 < 0) {
588  CHECK_EQ(-1, i1);
589  return InternalTargetValue(static_cast<const std::string*>(nullptr));
590  }
591  CHECK_LT(storage_lookup_result.storage_idx,
592  result_set_->serialized_varlen_buffer_.size());
593  const auto& varlen_buffer_for_fragment =
594  result_set_->serialized_varlen_buffer_[storage_lookup_result.storage_idx];
595  CHECK_LT(static_cast<size_t>(i1), varlen_buffer_for_fragment.size());
596  return InternalTargetValue(&varlen_buffer_for_fragment[i1]);
597  }
598  CHECK(offsets_for_target.ptr2);
599  const auto i2 = read_int_from_buff(
601  entry_idx, offsets_for_target.ptr2, offsets_for_target.compact_sz2),
602  offsets_for_target.compact_sz2);
603  CHECK_GE(i2, 0);
604  return result_set_->getVarlenOrderEntry(i1, i2);
605  }
606  return InternalTargetValue(
607  agg_info.sql_type.is_fp()
608  ? i1
609  : int_resize_cast(i1, agg_info.sql_type.get_logical_size()));
610  }
611 }
612 
614  const size_t str_len) const {
615  char* host_str_ptr{nullptr};
616  std::vector<int8_t> cpu_buffer;
618  cpu_buffer.resize(str_len);
619  const auto executor = query_mem_desc_.getExecutor();
620  CHECK(executor);
621  auto& data_mgr = executor->catalog_->getDataMgr();
622  copy_from_gpu(&data_mgr,
623  &cpu_buffer[0],
624  static_cast<CUdeviceptr>(str_ptr),
625  str_len,
626  device_id_);
627  host_str_ptr = reinterpret_cast<char*>(&cpu_buffer[0]);
628  } else {
630  host_str_ptr = reinterpret_cast<char*>(str_ptr);
631  }
632  std::string str(host_str_ptr, str_len);
633  return InternalTargetValue(row_set_mem_owner_->addString(str));
634 }
635 
636 int64_t ResultSet::lazyReadInt(const int64_t ival,
637  const size_t target_logical_idx,
638  const StorageLookupResult& storage_lookup_result) const {
639  if (!lazy_fetch_info_.empty()) {
640  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
641  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
642  if (col_lazy_fetch.is_lazily_fetched) {
643  CHECK_LT(static_cast<size_t>(storage_lookup_result.storage_idx),
644  col_buffers_.size());
645  int64_t ival_copy = ival;
646  auto& frag_col_buffers =
647  getColumnFrag(static_cast<size_t>(storage_lookup_result.storage_idx),
648  target_logical_idx,
649  ival_copy);
650  auto& frag_col_buffer = frag_col_buffers[col_lazy_fetch.local_col_id];
651  CHECK_LT(target_logical_idx, targets_.size());
652  const TargetInfo& target_info = targets_[target_logical_idx];
653  CHECK(!target_info.is_agg);
654  if (target_info.sql_type.is_string() &&
655  target_info.sql_type.get_compression() == kENCODING_NONE) {
656  VarlenDatum vd;
657  bool is_end{false};
659  reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(frag_col_buffer)),
660  storage_lookup_result.fixedup_entry_idx,
661  false,
662  &vd,
663  &is_end);
664  CHECK(!is_end);
665  if (vd.is_null) {
666  return 0;
667  }
668  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
669  return reinterpret_cast<int64_t>(row_set_mem_owner_->addString(fetched_str));
670  }
671  return lazy_decode(col_lazy_fetch, frag_col_buffer, ival_copy);
672  }
673  }
674  return ival;
675 }
676 
677 // Not all entries in the buffer represent a valid row. Advance the internal cursor
678 // used for the getNextRow method to the next row which is valid.
681  iter.global_entry_idx_valid_ = false;
682  return;
683  }
684 
685  while (iter.crt_row_buff_idx_ < entryCount()) {
686  const auto entry_idx = permutation_.empty() ? iter.crt_row_buff_idx_
688  const auto storage_lookup_result = findStorage(entry_idx);
689  const auto storage = storage_lookup_result.storage_ptr;
690  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
691  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
692  if (iter.fetched_so_far_ < drop_first_) {
693  ++iter.fetched_so_far_;
694  } else {
695  break;
696  }
697  }
698  ++iter.crt_row_buff_idx_;
699  }
700  if (permutation_.empty()) {
702  } else {
704  iter.global_entry_idx_ = iter.crt_row_buff_idx_ == permutation_.size()
705  ? iter.crt_row_buff_idx_
707  }
708 
710 
711  if (iter.global_entry_idx_valid_) {
712  ++iter.crt_row_buff_idx_;
713  ++iter.fetched_so_far_;
714  }
715 }
716 
717 // Not all entries in the buffer represent a valid row. Advance the internal cursor
718 // used for the getNextRow method to the next row which is valid.
720  while (crt_row_buff_idx_ < entryCount()) {
721  const auto entry_idx =
723  const auto storage_lookup_result = findStorage(entry_idx);
724  const auto storage = storage_lookup_result.storage_ptr;
725  const auto fixedup_entry_idx = storage_lookup_result.fixedup_entry_idx;
726  if (!storage->isEmptyEntry(fixedup_entry_idx)) {
727  break;
728  }
730  }
731  if (permutation_.empty()) {
732  return crt_row_buff_idx_;
733  }
737 }
738 
739 size_t ResultSet::entryCount() const {
740  return permutation_.empty() ? query_mem_desc_.getEntryCount() : permutation_.size();
741 }
742 
743 size_t ResultSet::getBufferSizeBytes(const ExecutorDeviceType device_type) const {
744  CHECK(storage_);
745  return storage_->query_mem_desc_.getBufferSizeBytes(device_type);
746 }
747 
748 int64_t lazy_decode(const ColumnLazyFetchInfo& col_lazy_fetch,
749  const int8_t* byte_stream,
750  const int64_t pos) {
751  CHECK(col_lazy_fetch.is_lazily_fetched);
752  const auto& type_info = col_lazy_fetch.type;
753  if (type_info.is_fp()) {
754  if (type_info.get_type() == kFLOAT) {
755  double fval = fixed_width_float_decode_noinline(byte_stream, pos);
756  return *reinterpret_cast<const int64_t*>(may_alias_ptr(&fval));
757  } else {
758  double fval = fixed_width_double_decode_noinline(byte_stream, pos);
759  return *reinterpret_cast<const int64_t*>(may_alias_ptr(&fval));
760  }
761  }
762  CHECK(type_info.is_integer() || type_info.is_decimal() || type_info.is_time() ||
763  type_info.is_timeinterval() || type_info.is_boolean() || type_info.is_string() ||
764  type_info.is_array());
765  size_t type_bitwidth = get_bit_width(type_info);
766  if (type_info.get_compression() == kENCODING_FIXED) {
767  type_bitwidth = type_info.get_comp_param();
768  } else if (type_info.get_compression() == kENCODING_DICT) {
769  type_bitwidth = 8 * type_info.get_size();
770  }
771  CHECK_EQ(size_t(0), type_bitwidth % 8);
772  int64_t val;
773  if (type_info.is_date_in_days()) {
774  val = type_info.get_comp_param() == 16
776  byte_stream, 2, NULL_SMALLINT, NULL_BIGINT, pos)
778  byte_stream, 4, NULL_INT, NULL_BIGINT, pos);
779  } else {
780  val = (type_info.get_compression() == kENCODING_DICT &&
781  type_info.get_size() < type_info.get_logical_size() &&
782  type_info.get_comp_param())
783  ? fixed_width_unsigned_decode_noinline(byte_stream, type_bitwidth / 8, pos)
784  : fixed_width_int_decode_noinline(byte_stream, type_bitwidth / 8, pos);
785  }
786  if (type_info.get_compression() != kENCODING_NONE &&
787  type_info.get_compression() != kENCODING_DATE_IN_DAYS) {
788  CHECK(type_info.get_compression() == kENCODING_FIXED ||
789  type_info.get_compression() == kENCODING_DICT);
790  auto encoding = type_info.get_compression();
791  if (encoding == kENCODING_FIXED) {
792  encoding = kENCODING_NONE;
793  }
794  SQLTypeInfo col_logical_ti(type_info.get_type(),
795  type_info.get_dimension(),
796  type_info.get_scale(),
797  false,
798  encoding,
799  0,
800  type_info.get_subtype());
801  if (val == inline_fixed_encoding_null_val(type_info)) {
802  return inline_int_null_val(col_logical_ti);
803  }
804  }
805  return val;
806 }
807 
808 namespace {
809 
810 template <class T>
812  return ScalarTargetValue(static_cast<int64_t>(val));
813 }
814 
815 template <>
817  return ScalarTargetValue(val);
818 }
819 
820 template <>
822  return ScalarTargetValue(val);
823 }
824 
825 template <class T>
827  const int8_t* buff,
828  const size_t buff_sz,
829  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner) {
830  std::vector<ScalarTargetValue> values;
831  auto buff_elems = reinterpret_cast<const T*>(buff);
832  CHECK_EQ(size_t(0), buff_sz % sizeof(T));
833  const size_t num_elems = buff_sz / sizeof(T);
834  for (size_t i = 0; i < num_elems; ++i) {
835  values.push_back(make_scalar_tv<T>(buff_elems[i]));
836  }
837  return ArrayTargetValue(values);
838 }
839 
841  const int32_t* buff,
842  const size_t buff_sz,
843  const int dict_id,
844  const bool translate_strings,
845  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
846  const Executor* executor) {
847  std::vector<ScalarTargetValue> values;
848  CHECK_EQ(size_t(0), buff_sz % sizeof(int32_t));
849  const size_t num_elems = buff_sz / sizeof(int32_t);
850  if (translate_strings) {
851  for (size_t i = 0; i < num_elems; ++i) {
852  const auto string_id = buff[i];
853 
854  if (string_id == NULL_INT) {
855  values.emplace_back(NullableString(nullptr));
856  } else {
857  if (dict_id == 0) {
858  StringDictionaryProxy* sdp = row_set_mem_owner->getLiteralStringDictProxy();
859  values.emplace_back(sdp->getString(string_id));
860  } else {
861  values.emplace_back(NullableString(
862  executor->getStringDictionaryProxy(dict_id, row_set_mem_owner, false)
863  ->getString(string_id)));
864  }
865  }
866  }
867  } else {
868  for (size_t i = 0; i < num_elems; i++) {
869  values.emplace_back(static_cast<int64_t>(buff[i]));
870  }
871  }
872  return ArrayTargetValue(values);
873 }
874 
876  const int8_t* buff,
877  const size_t buff_sz,
878  const bool translate_strings,
879  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
880  const Executor* executor) {
881  CHECK(array_ti.is_array());
882  const auto& elem_ti = array_ti.get_elem_type();
883  if (elem_ti.is_string()) {
884  return build_string_array_target_value(reinterpret_cast<const int32_t*>(buff),
885  buff_sz,
886  elem_ti.get_comp_param(),
887  translate_strings,
888  row_set_mem_owner,
889  executor);
890  }
891  switch (elem_ti.get_size()) {
892  case 1:
893  return build_array_target_value<int8_t>(buff, buff_sz, row_set_mem_owner);
894  case 2:
895  return build_array_target_value<int16_t>(buff, buff_sz, row_set_mem_owner);
896  case 4:
897  if (elem_ti.is_fp()) {
898  return build_array_target_value<float>(buff, buff_sz, row_set_mem_owner);
899  } else {
900  return build_array_target_value<int32_t>(buff, buff_sz, row_set_mem_owner);
901  }
902  case 8:
903  if (elem_ti.is_fp()) {
904  return build_array_target_value<double>(buff, buff_sz, row_set_mem_owner);
905  } else {
906  return build_array_target_value<int64_t>(buff, buff_sz, row_set_mem_owner);
907  }
908  default:
909  CHECK(false);
910  }
911  CHECK(false);
912  return TargetValue(nullptr);
913 }
914 
915 template <class Tuple, size_t... indices>
916 inline std::vector<std::pair<const int8_t*, const int64_t>> make_vals_vector(
917  std::index_sequence<indices...>,
918  const Tuple& tuple) {
919  return std::vector<std::pair<const int8_t*, const int64_t>>{
920  std::make_pair(std::get<2 * indices>(tuple), std::get<2 * indices + 1>(tuple))...};
921 }
922 
923 inline std::unique_ptr<ArrayDatum> lazy_fetch_chunk(const int8_t* ptr,
924  const int64_t varlen_ptr) {
925  auto ad = std::make_unique<ArrayDatum>();
926  bool is_end;
927  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(ptr)),
928  varlen_ptr,
929  ad.get(),
930  &is_end);
931  CHECK(!is_end);
932  return ad;
933 }
934 
936  template <typename... T>
937  static inline auto fetch(const ResultSet::GeoReturnType return_type, T&&... vals) {
938  constexpr int num_vals = sizeof...(vals);
939  static_assert(
940  num_vals % 2 == 0,
941  "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
942  const auto vals_vector = make_vals_vector(std::make_index_sequence<num_vals / 2>{},
943  std::make_tuple(vals...));
944  std::array<VarlenDatumPtr, num_vals / 2> ad_arr;
945  size_t ctr = 0;
946  for (const auto& col_pair : vals_vector) {
947  ad_arr[ctr++] = lazy_fetch_chunk(col_pair.first, col_pair.second);
948  }
949  return ad_arr;
950  }
951 };
952 
953 inline std::unique_ptr<ArrayDatum> fetch_data_from_gpu(int64_t varlen_ptr,
954  const int64_t length,
955  Data_Namespace::DataMgr* data_mgr,
956  const int device_id) {
957  auto cpu_buf = std::shared_ptr<int8_t>(new int8_t[length], FreeDeleter());
959  data_mgr, cpu_buf.get(), static_cast<CUdeviceptr>(varlen_ptr), length, device_id);
960  return std::make_unique<ArrayDatum>(length, cpu_buf, false);
961 }
962 
964  static inline auto yieldGpuPtrFetcher() {
965  return [](const int64_t ptr, const int64_t length) -> VarlenDatumPtr {
966  return std::make_unique<VarlenDatum>(length, reinterpret_cast<int8_t*>(ptr), false);
967  };
968  }
969 
970  static inline auto yieldGpuDatumFetcher(Data_Namespace::DataMgr* data_mgr_ptr,
971  const int device_id) {
972  return [data_mgr_ptr, device_id](const int64_t ptr,
973  const int64_t length) -> VarlenDatumPtr {
974  return fetch_data_from_gpu(ptr, length, data_mgr_ptr, device_id);
975  };
976  }
977 
978  static inline auto yieldCpuDatumFetcher() {
979  return [](const int64_t ptr, const int64_t length) -> VarlenDatumPtr {
980  return std::make_unique<VarlenDatum>(length, reinterpret_cast<int8_t*>(ptr), false);
981  };
982  }
983 
984  template <typename... T>
985  static inline auto fetch(const ResultSet::GeoReturnType return_type,
986  Data_Namespace::DataMgr* data_mgr,
987  const bool fetch_data_from_gpu,
988  const int device_id,
989  T&&... vals) {
990  auto ad_arr_generator = [&](auto datum_fetcher) {
991  constexpr int num_vals = sizeof...(vals);
992  static_assert(
993  num_vals % 2 == 0,
994  "Must have consistent pointer/size pairs for lazy fetch of geo target values.");
995  const auto vals_vector = std::vector<int64_t>{vals...};
996 
997  std::array<VarlenDatumPtr, num_vals / 2> ad_arr;
998  size_t ctr = 0;
999  for (size_t i = 0; i < vals_vector.size(); i += 2) {
1000  ad_arr[ctr++] = datum_fetcher(vals_vector[i], vals_vector[i + 1]);
1001  }
1002  return ad_arr;
1003  };
1004 
1005  if (fetch_data_from_gpu) {
1007  return ad_arr_generator(yieldGpuPtrFetcher());
1008  } else {
1009  return ad_arr_generator(yieldGpuDatumFetcher(data_mgr, device_id));
1010  }
1011  } else {
1012  return ad_arr_generator(yieldCpuDatumFetcher());
1013  }
1014  }
1015 };
1016 
1017 template <SQLTypes GEO_SOURCE_TYPE, typename GeoTargetFetcher>
1019  template <typename... T>
1020  static inline TargetValue build(const SQLTypeInfo& geo_ti,
1021  const ResultSet::GeoReturnType return_type,
1022  T&&... vals) {
1023  auto ad_arr = GeoTargetFetcher::fetch(return_type, std::forward<T>(vals)...);
1024  static_assert(std::tuple_size<decltype(ad_arr)>::value > 0,
1025  "ArrayDatum array for Geo Target must contain at least one value.");
1026 
1027  switch (return_type) {
1029  if (ad_arr[0]->is_null) {
1030  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1031  }
1033  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1034  ad_arr);
1035  }
1038  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1039  ad_arr);
1040  }
1043  if (ad_arr[0]->is_null) {
1044  return GeoTargetValuePtr();
1045  }
1047  GEO_SOURCE_TYPE>::GeoSerializerType::serialize(geo_ti,
1048  ad_arr);
1049  }
1050  default: {
1051  UNREACHABLE();
1052  return TargetValue(nullptr);
1053  }
1054  }
1055  }
1056 };
1057 
1058 template <typename T>
1059 inline std::pair<int64_t, int64_t> get_frag_id_and_local_idx(
1060  const std::vector<std::vector<T>>& frag_offsets,
1061  const size_t tab_or_col_idx,
1062  const int64_t global_idx) {
1063  CHECK_GE(global_idx, int64_t(0));
1064  for (int64_t frag_id = frag_offsets.size() - 1; frag_id > 0; --frag_id) {
1065  CHECK_LT(tab_or_col_idx, frag_offsets[frag_id].size());
1066  const auto frag_off = static_cast<int64_t>(frag_offsets[frag_id][tab_or_col_idx]);
1067  if (frag_off < global_idx) {
1068  return {frag_id, global_idx - frag_off};
1069  }
1070  }
1071  return {-1, -1};
1072 }
1073 
1074 } // namespace
1075 
1076 const std::vector<const int8_t*>& ResultSet::getColumnFrag(const size_t storage_idx,
1077  const size_t col_logical_idx,
1078  int64_t& global_idx) const {
1079  CHECK_LT(static_cast<size_t>(storage_idx), col_buffers_.size());
1080  if (col_buffers_[storage_idx].size() > 1) {
1081  int64_t frag_id = 0;
1082  int64_t local_idx = global_idx;
1083  if (consistent_frag_sizes_[storage_idx][col_logical_idx] != -1) {
1084  frag_id = global_idx / consistent_frag_sizes_[storage_idx][col_logical_idx];
1085  local_idx = global_idx % consistent_frag_sizes_[storage_idx][col_logical_idx];
1086  } else {
1087  std::tie(frag_id, local_idx) = get_frag_id_and_local_idx(
1088  frag_offsets_[storage_idx], col_logical_idx, global_idx);
1089  CHECK_LE(local_idx, global_idx);
1090  }
1091  CHECK_GE(frag_id, int64_t(0));
1092  CHECK_LT(static_cast<size_t>(frag_id), col_buffers_[storage_idx].size());
1093  global_idx = local_idx;
1094  return col_buffers_[storage_idx][frag_id];
1095  } else {
1096  CHECK_EQ(size_t(1), col_buffers_[storage_idx].size());
1097  return col_buffers_[storage_idx][0];
1098  }
1099 }
1100 
1105 void ResultSet::copyColumnIntoBuffer(const size_t column_idx,
1106  int8_t* output_buffer,
1107  const size_t output_buffer_size) const {
1109  CHECK_LT(column_idx, query_mem_desc_.getSlotCount());
1110  CHECK(output_buffer_size > 0);
1111  CHECK(output_buffer);
1112  const auto column_width_size = query_mem_desc_.getPaddedSlotWidthBytes(column_idx);
1113  size_t out_buff_offset = 0;
1114 
1115  // the main storage:
1116  const size_t crt_storage_row_count = storage_->query_mem_desc_.getEntryCount();
1117  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1118  const size_t column_offset = storage_->query_mem_desc_.getColOffInBytes(column_idx);
1119  const int8_t* storage_buffer = storage_->getUnderlyingBuffer() + column_offset;
1120  CHECK(crt_buffer_size <= output_buffer_size);
1121  std::memcpy(output_buffer, storage_buffer, crt_buffer_size);
1122 
1123  out_buff_offset += crt_buffer_size;
1124 
1125  // the appended storages:
1126  for (size_t i = 0; i < appended_storage_.size(); i++) {
1127  CHECK_LT(out_buff_offset, output_buffer_size);
1128  const size_t crt_storage_row_count =
1129  appended_storage_[i]->query_mem_desc_.getEntryCount();
1130  const size_t crt_buffer_size = crt_storage_row_count * column_width_size;
1131  const size_t column_offset =
1132  appended_storage_[i]->query_mem_desc_.getColOffInBytes(column_idx);
1133  const int8_t* storage_buffer =
1134  appended_storage_[i]->getUnderlyingBuffer() + column_offset;
1135  CHECK(out_buff_offset + crt_buffer_size <= output_buffer_size);
1136  std::memcpy(output_buffer + out_buff_offset, storage_buffer, crt_buffer_size);
1137 
1138  out_buff_offset += crt_buffer_size;
1139  }
1140 }
1141 
1142 // Interprets ptr1, ptr2 as the ptr and len pair used for variable length data.
1144  const int8_t compact_sz1,
1145  const int8_t* ptr2,
1146  const int8_t compact_sz2,
1147  const TargetInfo& target_info,
1148  const size_t target_logical_idx,
1149  const bool translate_strings,
1150  const size_t entry_buff_idx) const {
1151  auto varlen_ptr = read_int_from_buff(ptr1, compact_sz1);
1152  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1153  if (varlen_ptr < 0) {
1154  CHECK_EQ(-1, varlen_ptr);
1155  if (target_info.sql_type.get_type() == kARRAY) {
1156  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1157  }
1158  return TargetValue(nullptr);
1159  }
1160  const auto storage_idx = getStorageIndex(entry_buff_idx);
1161  if (target_info.sql_type.is_string()) {
1162  CHECK(target_info.sql_type.get_compression() == kENCODING_NONE);
1163  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1164  const auto& varlen_buffer_for_storage =
1165  serialized_varlen_buffer_[storage_idx.first];
1166  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer_for_storage.size());
1167  return varlen_buffer_for_storage[varlen_ptr];
1168  } else if (target_info.sql_type.get_type() == kARRAY) {
1169  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1170  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1171  CHECK_LT(static_cast<size_t>(varlen_ptr), varlen_buffer.size());
1172 
1173  return build_array_target_value(
1174  target_info.sql_type,
1175  reinterpret_cast<const int8_t*>(varlen_buffer[varlen_ptr].data()),
1176  varlen_buffer[varlen_ptr].size(),
1177  translate_strings,
1179  executor_);
1180  } else {
1181  CHECK(false);
1182  }
1183  }
1184  if (!lazy_fetch_info_.empty()) {
1185  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1186  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1187  if (col_lazy_fetch.is_lazily_fetched) {
1188  const auto storage_idx = getStorageIndex(entry_buff_idx);
1189  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1190  auto& frag_col_buffers =
1191  getColumnFrag(storage_idx.first, target_logical_idx, varlen_ptr);
1192  bool is_end{false};
1193  if (target_info.sql_type.is_string()) {
1194  VarlenDatum vd;
1195  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1196  frag_col_buffers[col_lazy_fetch.local_col_id])),
1197  varlen_ptr,
1198  false,
1199  &vd,
1200  &is_end);
1201  CHECK(!is_end);
1202  if (vd.is_null) {
1203  return TargetValue(nullptr);
1204  }
1205  CHECK(vd.pointer);
1206  CHECK_GT(vd.length, 0u);
1207  std::string fetched_str(reinterpret_cast<char*>(vd.pointer), vd.length);
1208  return fetched_str;
1209  } else {
1210  CHECK(target_info.sql_type.is_array());
1211  ArrayDatum ad;
1212  ChunkIter_get_nth(reinterpret_cast<ChunkIter*>(const_cast<int8_t*>(
1213  frag_col_buffers[col_lazy_fetch.local_col_id])),
1214  varlen_ptr,
1215  &ad,
1216  &is_end);
1217  CHECK(!is_end);
1218  if (ad.is_null) {
1219  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1220  }
1221  CHECK_GE(ad.length, 0u);
1222  if (ad.length > 0) {
1223  CHECK(ad.pointer);
1224  }
1225  return build_array_target_value(target_info.sql_type,
1226  ad.pointer,
1227  ad.length,
1228  translate_strings,
1230  executor_);
1231  }
1232  }
1233  }
1234  if (!varlen_ptr) {
1235  if (target_info.sql_type.is_array()) {
1236  return ArrayTargetValue(boost::optional<std::vector<ScalarTargetValue>>{});
1237  }
1238  return TargetValue(nullptr);
1239  }
1240  auto length = read_int_from_buff(ptr2, compact_sz2);
1241  if (target_info.sql_type.is_array()) {
1242  const auto& elem_ti = target_info.sql_type.get_elem_type();
1243  length *= elem_ti.get_array_context_logical_size();
1244  }
1245  std::vector<int8_t> cpu_buffer;
1246  if (varlen_ptr && device_type_ == ExecutorDeviceType::GPU) {
1247  cpu_buffer.resize(length);
1248  const auto executor = query_mem_desc_.getExecutor();
1249  CHECK(executor);
1250  auto& data_mgr = executor->catalog_->getDataMgr();
1251  copy_from_gpu(&data_mgr,
1252  &cpu_buffer[0],
1253  static_cast<CUdeviceptr>(varlen_ptr),
1254  length,
1255  device_id_);
1256  varlen_ptr = reinterpret_cast<int64_t>(&cpu_buffer[0]);
1257  }
1258  if (target_info.sql_type.is_array()) {
1259  return build_array_target_value(target_info.sql_type,
1260  reinterpret_cast<const int8_t*>(varlen_ptr),
1261  length,
1262  translate_strings,
1264  executor_);
1265  }
1266  return std::string(reinterpret_cast<char*>(varlen_ptr), length);
1267 }
1268 
1269 bool ResultSet::isGeoColOnGpu(const size_t col_idx) const {
1270  // This should match the logic in makeGeoTargetValue which ultimately calls
1271  // fetch_data_from_gpu when the geo column is on the device.
1272  // TODO(croot): somehow find a way to refactor this and makeGeoTargetValue to use a
1273  // utility function that handles this logic in one place
1274  CHECK_LT(col_idx, targets_.size());
1275  if (!IS_GEO(targets_[col_idx].sql_type.get_type())) {
1276  throw std::runtime_error("Column target at index " + std::to_string(col_idx) +
1277  " is not a geo column. It is of type " +
1278  targets_[col_idx].sql_type.get_type_name() + ".");
1279  }
1280 
1281  const auto& target_info = targets_[col_idx];
1282  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1283  return false;
1284  }
1285 
1286  if (!lazy_fetch_info_.empty()) {
1287  CHECK_LT(col_idx, lazy_fetch_info_.size());
1288  if (lazy_fetch_info_[col_idx].is_lazily_fetched) {
1289  return false;
1290  }
1291  }
1292 
1294 }
1295 
1296 // Reads a geo value from a series of ptrs to var len types
1297 // In Columnar format, geo_target_ptr is the geo column ptr (a pointer to the beginning
1298 // of that specific geo column) and should be appropriately adjusted with the
1299 // entry_buff_idx
1300 TargetValue ResultSet::makeGeoTargetValue(const int8_t* geo_target_ptr,
1301  const size_t slot_idx,
1302  const TargetInfo& target_info,
1303  const size_t target_logical_idx,
1304  const size_t entry_buff_idx) const {
1305  CHECK(target_info.sql_type.is_geometry());
1306 
1307  auto getNextTargetBufferRowWise = [&](const size_t slot_idx, const size_t range) {
1308  return geo_target_ptr + query_mem_desc_.getPaddedColWidthForRange(slot_idx, range);
1309  };
1310 
1311  auto getNextTargetBufferColWise = [&](const size_t slot_idx, const size_t range) {
1312  const auto storage_info = findStorage(entry_buff_idx);
1313  auto crt_geo_col_ptr = geo_target_ptr;
1314  for (size_t i = slot_idx; i < slot_idx + range; i++) {
1315  crt_geo_col_ptr = advance_to_next_columnar_target_buff(
1316  crt_geo_col_ptr, storage_info.storage_ptr->query_mem_desc_, i);
1317  }
1318  // adjusting the column pointer to represent a pointer to the geo target value
1319  return crt_geo_col_ptr +
1320  storage_info.fixedup_entry_idx *
1321  storage_info.storage_ptr->query_mem_desc_.getPaddedSlotWidthBytes(
1322  slot_idx + range);
1323  };
1324 
1325  auto getNextTargetBuffer = [&](const size_t slot_idx, const size_t range) {
1327  ? getNextTargetBufferColWise(slot_idx, range)
1328  : getNextTargetBufferRowWise(slot_idx, range);
1329  };
1330 
1331  auto getCoordsDataPtr = [&](const int8_t* geo_target_ptr) {
1332  return read_int_from_buff(getNextTargetBuffer(slot_idx, 0),
1334  };
1335 
1336  auto getCoordsLength = [&](const int8_t* geo_target_ptr) {
1337  return read_int_from_buff(getNextTargetBuffer(slot_idx, 1),
1339  };
1340 
1341  auto getRingSizesPtr = [&](const int8_t* geo_target_ptr) {
1342  return read_int_from_buff(getNextTargetBuffer(slot_idx, 2),
1344  };
1345 
1346  auto getRingSizesLength = [&](const int8_t* geo_target_ptr) {
1347  return read_int_from_buff(getNextTargetBuffer(slot_idx, 3),
1349  };
1350 
1351  auto getPolyRingsPtr = [&](const int8_t* geo_target_ptr) {
1352  return read_int_from_buff(getNextTargetBuffer(slot_idx, 4),
1354  };
1355 
1356  auto getPolyRingsLength = [&](const int8_t* geo_target_ptr) {
1357  return read_int_from_buff(getNextTargetBuffer(slot_idx, 5),
1359  };
1360 
1361  auto getFragColBuffers = [&]() -> decltype(auto) {
1362  const auto storage_idx = getStorageIndex(entry_buff_idx);
1363  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1364  auto global_idx = getCoordsDataPtr(geo_target_ptr);
1365  return getColumnFrag(storage_idx.first, target_logical_idx, global_idx);
1366  };
1367 
1368  const bool is_gpu_fetch = device_type_ == ExecutorDeviceType::GPU;
1369 
1370  auto getDataMgr = [&]() {
1371  auto executor = query_mem_desc_.getExecutor();
1372  CHECK(executor);
1373  auto& data_mgr = executor->catalog_->getDataMgr();
1374  return &data_mgr;
1375  };
1376 
1377  auto getSeparateVarlenStorage = [&]() -> decltype(auto) {
1378  const auto storage_idx = getStorageIndex(entry_buff_idx);
1379  CHECK_LT(static_cast<size_t>(storage_idx.first), serialized_varlen_buffer_.size());
1380  const auto& varlen_buffer = serialized_varlen_buffer_[storage_idx.first];
1381  return varlen_buffer;
1382  };
1383 
1384  if (separate_varlen_storage_valid_ && getCoordsDataPtr(geo_target_ptr) < 0) {
1385  CHECK_EQ(-1, getCoordsDataPtr(geo_target_ptr));
1386  return TargetValue(nullptr);
1387  }
1388 
1389  const ColumnLazyFetchInfo* col_lazy_fetch = nullptr;
1390  if (!lazy_fetch_info_.empty()) {
1391  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1392  col_lazy_fetch = &lazy_fetch_info_[target_logical_idx];
1393  }
1394 
1395  switch (target_info.sql_type.get_type()) {
1396  case kPOINT: {
1397  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1398  const auto& varlen_buffer = getSeparateVarlenStorage();
1399  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1400  varlen_buffer.size());
1401 
1402  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1403  target_info.sql_type,
1405  nullptr,
1406  false,
1407  device_id_,
1408  reinterpret_cast<int64_t>(
1409  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1410  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1411  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1412  const auto& frag_col_buffers = getFragColBuffers();
1413  return GeoTargetValueBuilder<kPOINT, GeoLazyFetchHandler>::build(
1414  target_info.sql_type,
1416  frag_col_buffers[col_lazy_fetch->local_col_id],
1417  getCoordsDataPtr(geo_target_ptr));
1418  } else {
1419  return GeoTargetValueBuilder<kPOINT, GeoQueryOutputFetchHandler>::build(
1420  target_info.sql_type,
1422  is_gpu_fetch ? getDataMgr() : nullptr,
1423  is_gpu_fetch,
1424  device_id_,
1425  getCoordsDataPtr(geo_target_ptr),
1426  getCoordsLength(geo_target_ptr));
1427  }
1428  break;
1429  }
1430  case kLINESTRING: {
1431  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1432  const auto& varlen_buffer = getSeparateVarlenStorage();
1433  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr)),
1434  varlen_buffer.size());
1435 
1436  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1437  target_info.sql_type,
1439  nullptr,
1440  false,
1441  device_id_,
1442  reinterpret_cast<int64_t>(
1443  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1444  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()));
1445  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1446  const auto& frag_col_buffers = getFragColBuffers();
1447  return GeoTargetValueBuilder<kLINESTRING, GeoLazyFetchHandler>::build(
1448  target_info.sql_type,
1450  frag_col_buffers[col_lazy_fetch->local_col_id],
1451  getCoordsDataPtr(geo_target_ptr));
1452  } else {
1453  return GeoTargetValueBuilder<kLINESTRING, GeoQueryOutputFetchHandler>::build(
1454  target_info.sql_type,
1456  is_gpu_fetch ? getDataMgr() : nullptr,
1457  is_gpu_fetch,
1458  device_id_,
1459  getCoordsDataPtr(geo_target_ptr),
1460  getCoordsLength(geo_target_ptr));
1461  }
1462  break;
1463  }
1464  case kPOLYGON: {
1465  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1466  const auto& varlen_buffer = getSeparateVarlenStorage();
1467  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 1),
1468  varlen_buffer.size());
1469 
1470  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1471  target_info.sql_type,
1473  nullptr,
1474  false,
1475  device_id_,
1476  reinterpret_cast<int64_t>(
1477  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1478  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1479  reinterpret_cast<int64_t>(
1480  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1481  static_cast<int64_t>(
1482  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()));
1483  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1484  const auto& frag_col_buffers = getFragColBuffers();
1485 
1486  return GeoTargetValueBuilder<kPOLYGON, GeoLazyFetchHandler>::build(
1487  target_info.sql_type,
1489  frag_col_buffers[col_lazy_fetch->local_col_id],
1490  getCoordsDataPtr(geo_target_ptr),
1491  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1492  getCoordsDataPtr(geo_target_ptr));
1493  } else {
1494  return GeoTargetValueBuilder<kPOLYGON, GeoQueryOutputFetchHandler>::build(
1495  target_info.sql_type,
1497  is_gpu_fetch ? getDataMgr() : nullptr,
1498  is_gpu_fetch,
1499  device_id_,
1500  getCoordsDataPtr(geo_target_ptr),
1501  getCoordsLength(geo_target_ptr),
1502  getRingSizesPtr(geo_target_ptr),
1503  getRingSizesLength(geo_target_ptr) * 4);
1504  }
1505  break;
1506  }
1507  case kMULTIPOLYGON: {
1508  if (separate_varlen_storage_valid_ && !target_info.is_agg) {
1509  const auto& varlen_buffer = getSeparateVarlenStorage();
1510  CHECK_LT(static_cast<size_t>(getCoordsDataPtr(geo_target_ptr) + 2),
1511  varlen_buffer.size());
1512 
1513  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1514  target_info.sql_type,
1516  nullptr,
1517  false,
1518  device_id_,
1519  reinterpret_cast<int64_t>(
1520  varlen_buffer[getCoordsDataPtr(geo_target_ptr)].data()),
1521  static_cast<int64_t>(varlen_buffer[getCoordsDataPtr(geo_target_ptr)].size()),
1522  reinterpret_cast<int64_t>(
1523  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].data()),
1524  static_cast<int64_t>(
1525  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 1].size()),
1526  reinterpret_cast<int64_t>(
1527  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].data()),
1528  static_cast<int64_t>(
1529  varlen_buffer[getCoordsDataPtr(geo_target_ptr) + 2].size()));
1530  } else if (col_lazy_fetch && col_lazy_fetch->is_lazily_fetched) {
1531  const auto& frag_col_buffers = getFragColBuffers();
1532 
1533  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoLazyFetchHandler>::build(
1534  target_info.sql_type,
1536  frag_col_buffers[col_lazy_fetch->local_col_id],
1537  getCoordsDataPtr(geo_target_ptr),
1538  frag_col_buffers[col_lazy_fetch->local_col_id + 1],
1539  getCoordsDataPtr(geo_target_ptr),
1540  frag_col_buffers[col_lazy_fetch->local_col_id + 2],
1541  getCoordsDataPtr(geo_target_ptr));
1542  } else {
1543  return GeoTargetValueBuilder<kMULTIPOLYGON, GeoQueryOutputFetchHandler>::build(
1544  target_info.sql_type,
1546  is_gpu_fetch ? getDataMgr() : nullptr,
1547  is_gpu_fetch,
1548  device_id_,
1549  getCoordsDataPtr(geo_target_ptr),
1550  getCoordsLength(geo_target_ptr),
1551  getRingSizesPtr(geo_target_ptr),
1552  getRingSizesLength(geo_target_ptr) * 4,
1553  getPolyRingsPtr(geo_target_ptr),
1554  getPolyRingsLength(geo_target_ptr) * 4);
1555  }
1556  break;
1557  }
1558  default:
1559  throw std::runtime_error("Unknown Geometry type encountered: " +
1560  target_info.sql_type.get_type_name());
1561  }
1562  UNREACHABLE();
1563  return TargetValue(nullptr);
1564 }
1565 
1566 // Reads an integer or a float from ptr based on the type and the byte width.
1568  const int8_t compact_sz,
1569  const TargetInfo& target_info,
1570  const size_t target_logical_idx,
1571  const bool translate_strings,
1572  const bool decimal_to_double,
1573  const size_t entry_buff_idx) const {
1574  auto actual_compact_sz = compact_sz;
1575  if (target_info.sql_type.get_type() == kFLOAT &&
1578  actual_compact_sz = sizeof(float);
1579  } else {
1580  actual_compact_sz = sizeof(double);
1581  }
1582  if (target_info.is_agg &&
1583  (target_info.agg_kind == kAVG || target_info.agg_kind == kSUM ||
1584  target_info.agg_kind == kMIN || target_info.agg_kind == kMAX)) {
1585  // The above listed aggregates use two floats in a single 8-byte slot. Set the
1586  // padded size to 4 bytes to properly read each value.
1587  actual_compact_sz = sizeof(float);
1588  }
1589  }
1590  if (get_compact_type(target_info).is_date_in_days()) {
1591  // Dates encoded in days are converted to 8 byte values on read.
1592  actual_compact_sz = sizeof(int64_t);
1593  }
1594 
1595  // String dictionary keys are read as 32-bit values regardless of encoding
1596  if (target_info.sql_type.is_string() &&
1597  target_info.sql_type.get_compression() == kENCODING_DICT &&
1598  target_info.sql_type.get_comp_param()) {
1599  actual_compact_sz = sizeof(int32_t);
1600  }
1601 
1602  auto ival = read_int_from_buff(ptr, actual_compact_sz);
1603  const auto& chosen_type = get_compact_type(target_info);
1604  if (!lazy_fetch_info_.empty()) {
1605  CHECK_LT(target_logical_idx, lazy_fetch_info_.size());
1606  const auto& col_lazy_fetch = lazy_fetch_info_[target_logical_idx];
1607  if (col_lazy_fetch.is_lazily_fetched) {
1608  CHECK_GE(ival, 0);
1609  const auto storage_idx = getStorageIndex(entry_buff_idx);
1610  CHECK_LT(static_cast<size_t>(storage_idx.first), col_buffers_.size());
1611  auto& frag_col_buffers = getColumnFrag(storage_idx.first, target_logical_idx, ival);
1612  ival = lazy_decode(
1613  col_lazy_fetch, frag_col_buffers[col_lazy_fetch.local_col_id], ival);
1614  if (chosen_type.is_fp()) {
1615  const auto dval = *reinterpret_cast<const double*>(may_alias_ptr(&ival));
1616  if (chosen_type.get_type() == kFLOAT) {
1617  return ScalarTargetValue(static_cast<float>(dval));
1618  } else {
1619  return ScalarTargetValue(dval);
1620  }
1621  }
1622  }
1623  }
1624  if (chosen_type.is_fp()) {
1625  switch (actual_compact_sz) {
1626  case 8: {
1627  const auto dval = *reinterpret_cast<const double*>(ptr);
1628  return chosen_type.get_type() == kFLOAT
1629  ? ScalarTargetValue(static_cast<const float>(dval))
1630  : ScalarTargetValue(dval);
1631  }
1632  case 4: {
1633  CHECK_EQ(kFLOAT, chosen_type.get_type());
1634  return *reinterpret_cast<const float*>(ptr);
1635  }
1636  default:
1637  CHECK(false);
1638  }
1639  }
1640  if (chosen_type.is_integer() | chosen_type.is_boolean() || chosen_type.is_time() ||
1641  chosen_type.is_timeinterval()) {
1642  if (is_distinct_target(target_info)) {
1644  ival, query_mem_desc_.getCountDistinctDescriptor(target_logical_idx)));
1645  }
1646  // TODO(alex): remove int_resize_cast, make read_int_from_buff return the
1647  // right type instead
1648  if (inline_int_null_val(chosen_type) ==
1649  int_resize_cast(ival, chosen_type.get_logical_size())) {
1650  return inline_int_null_val(target_info.sql_type);
1651  }
1652  return ival;
1653  }
1654  if (chosen_type.is_string() && chosen_type.get_compression() == kENCODING_DICT) {
1655  if (translate_strings) {
1656  if (static_cast<int32_t>(ival) ==
1657  NULL_INT) { // TODO(alex): this isn't nice, fix it
1658  return NullableString(nullptr);
1659  }
1660  StringDictionaryProxy* sdp{nullptr};
1661  if (!chosen_type.get_comp_param()) {
1662  sdp = row_set_mem_owner_->getLiteralStringDictProxy();
1663  } else {
1664  sdp = executor_
1665  ? executor_->getStringDictionaryProxy(
1666  chosen_type.get_comp_param(), row_set_mem_owner_, false)
1667  : row_set_mem_owner_->getStringDictProxy(chosen_type.get_comp_param());
1668  }
1669  return NullableString(sdp->getString(ival));
1670  } else {
1671  return static_cast<int64_t>(static_cast<int32_t>(ival));
1672  }
1673  }
1674  if (chosen_type.is_decimal()) {
1675  if (decimal_to_double) {
1676  if (ival ==
1677  inline_int_null_val(SQLTypeInfo(decimal_to_int_type(chosen_type), false))) {
1678  return NULL_DOUBLE;
1679  }
1680  return static_cast<double>(ival) / exp_to_scale(chosen_type.get_scale());
1681  }
1682  return ival;
1683  }
1684  CHECK(false);
1685  return TargetValue(int64_t(0));
1686 }
1687 
1688 // Gets the TargetValue stored at position local_entry_idx in the col1_ptr and col2_ptr
1689 // column buffers. The second column is only used for AVG.
1690 // the global_entry_idx is passed to makeTargetValue to be used for
1691 // final lazy fetch (if there's any).
1693  const int8_t* col_ptr,
1694  const int8_t* keys_ptr,
1695  const QueryMemoryDescriptor& query_mem_desc,
1696  const size_t local_entry_idx,
1697  const size_t global_entry_idx,
1698  const TargetInfo& target_info,
1699  const size_t target_logical_idx,
1700  const size_t slot_idx,
1701  const bool translate_strings,
1702  const bool decimal_to_double) const {
1704  const auto col1_ptr = col_ptr;
1705  const auto compact_sz1 = query_mem_desc.getPaddedSlotWidthBytes(slot_idx);
1706  const auto next_col_ptr =
1707  advance_to_next_columnar_target_buff(col1_ptr, query_mem_desc, slot_idx);
1708  const auto col2_ptr = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1709  is_real_str_or_array(target_info))
1710  ? next_col_ptr
1711  : nullptr;
1712  const auto compact_sz2 = ((target_info.is_agg && target_info.agg_kind == kAVG) ||
1713  is_real_str_or_array(target_info))
1714  ? query_mem_desc.getPaddedSlotWidthBytes(slot_idx + 1)
1715  : 0;
1716 
1717  // TODO(Saman): add required logics for count distinct
1718  // geospatial target values:
1719  if (target_info.sql_type.is_geometry()) {
1720  return makeGeoTargetValue(
1721  col1_ptr, slot_idx, target_info, target_logical_idx, global_entry_idx);
1722  }
1723 
1724  const auto ptr1 = columnar_elem_ptr(local_entry_idx, col1_ptr, compact_sz1);
1725  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1726  CHECK(col2_ptr);
1727  CHECK(compact_sz2);
1728  const auto ptr2 = columnar_elem_ptr(local_entry_idx, col2_ptr, compact_sz2);
1729  return target_info.agg_kind == kAVG
1730  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1731  : makeVarlenTargetValue(ptr1,
1732  compact_sz1,
1733  ptr2,
1734  compact_sz2,
1735  target_info,
1736  target_logical_idx,
1737  translate_strings,
1738  global_entry_idx);
1739  }
1741  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1742  return makeTargetValue(ptr1,
1743  compact_sz1,
1744  target_info,
1745  target_logical_idx,
1746  translate_strings,
1747  decimal_to_double,
1748  global_entry_idx);
1749  }
1750  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1751  const auto key_idx = query_mem_desc_.getTargetGroupbyIndex(target_logical_idx);
1752  CHECK_GE(key_idx, 0);
1753  auto key_col_ptr = keys_ptr + key_idx * query_mem_desc_.getEntryCount() * key_width;
1754  return makeTargetValue(columnar_elem_ptr(local_entry_idx, key_col_ptr, key_width),
1755  key_width,
1756  target_info,
1757  target_logical_idx,
1758  translate_strings,
1759  decimal_to_double,
1760  global_entry_idx);
1761 }
1762 
1763 // Gets the TargetValue stored in slot_idx (and slot_idx for AVG) of
1764 // rowwise_target_ptr.
1766  int8_t* rowwise_target_ptr,
1767  int8_t* keys_ptr,
1768  const size_t entry_buff_idx,
1769  const TargetInfo& target_info,
1770  const size_t target_logical_idx,
1771  const size_t slot_idx,
1772  const bool translate_strings,
1773  const bool decimal_to_double,
1774  const bool fixup_count_distinct_pointers) const {
1775  if (UNLIKELY(fixup_count_distinct_pointers)) {
1776  if (is_distinct_target(target_info)) {
1777  auto count_distinct_ptr_ptr = reinterpret_cast<int64_t*>(rowwise_target_ptr);
1778  const auto remote_ptr = *count_distinct_ptr_ptr;
1779  if (remote_ptr) {
1780  const auto ptr = storage_->mappedPtr(remote_ptr);
1781  if (ptr) {
1782  *count_distinct_ptr_ptr = ptr;
1783  } else {
1784  // need to create a zero filled buffer for this remote_ptr
1785  const auto& count_distinct_desc =
1786  query_mem_desc_.count_distinct_descriptors_[target_logical_idx];
1787  const auto bitmap_byte_sz = count_distinct_desc.sub_bitmap_count == 1
1788  ? count_distinct_desc.bitmapSizeBytes()
1789  : count_distinct_desc.bitmapPaddedSizeBytes();
1790  auto count_distinct_buffer =
1791  static_cast<int8_t*>(checked_malloc(bitmap_byte_sz));
1792  memset(count_distinct_buffer, 0, bitmap_byte_sz);
1793  row_set_mem_owner_->addCountDistinctBuffer(
1794  count_distinct_buffer, bitmap_byte_sz, true);
1795  *count_distinct_ptr_ptr = reinterpret_cast<int64_t>(count_distinct_buffer);
1796  }
1797  }
1798  }
1799  return int64_t(0);
1800  }
1801  if (target_info.sql_type.is_geometry()) {
1802  return makeGeoTargetValue(
1803  rowwise_target_ptr, slot_idx, target_info, target_logical_idx, entry_buff_idx);
1804  }
1805 
1806  auto ptr1 = rowwise_target_ptr;
1807  int8_t compact_sz1 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1809  !query_mem_desc_.hasKeylessHash() && !target_info.is_agg) {
1810  // Single column perfect hash group by can utilize one slot for both the key and the
1811  // target value if both values fit in 8 bytes. Use the target value actual size for
1812  // this case. If they don't, the target value should be 8 bytes, so we can still use
1813  // the actual size rather than the compact size.
1814  compact_sz1 = query_mem_desc_.getLogicalSlotWidthBytes(slot_idx);
1815  }
1816 
1817  // logic for deciding width of column
1818  if (target_info.agg_kind == kAVG || is_real_str_or_array(target_info)) {
1819  const auto ptr2 =
1820  rowwise_target_ptr + query_mem_desc_.getPaddedSlotWidthBytes(slot_idx);
1821  int8_t compact_sz2 = 0;
1822  // Skip reading the second slot if we have a none encoded string and are using
1823  // the none encoded strings buffer attached to ResultSetStorage
1825  (target_info.sql_type.is_array() ||
1826  (target_info.sql_type.is_string() &&
1827  target_info.sql_type.get_compression() == kENCODING_NONE)))) {
1828  compact_sz2 = query_mem_desc_.getPaddedSlotWidthBytes(slot_idx + 1);
1829  }
1830  if (separate_varlen_storage_valid_ && target_info.is_agg) {
1831  compact_sz2 = 8; // TODO(adb): is there a better way to do this?
1832  }
1833  CHECK(ptr2);
1834  return target_info.agg_kind == kAVG
1835  ? make_avg_target_value(ptr1, compact_sz1, ptr2, compact_sz2, target_info)
1836  : makeVarlenTargetValue(ptr1,
1837  compact_sz1,
1838  ptr2,
1839  compact_sz2,
1840  target_info,
1841  target_logical_idx,
1842  translate_strings,
1843  entry_buff_idx);
1844  }
1846  query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) < 0) {
1847  return makeTargetValue(ptr1,
1848  compact_sz1,
1849  target_info,
1850  target_logical_idx,
1851  translate_strings,
1852  decimal_to_double,
1853  entry_buff_idx);
1854  }
1855  const auto key_width = query_mem_desc_.getEffectiveKeyWidth();
1856  ptr1 = keys_ptr + query_mem_desc_.getTargetGroupbyIndex(target_logical_idx) * key_width;
1857  return makeTargetValue(ptr1,
1858  key_width,
1859  target_info,
1860  target_logical_idx,
1861  translate_strings,
1862  decimal_to_double,
1863  entry_buff_idx);
1864 }
1865 
1866 // Returns true iff the entry at position entry_idx in buff contains a valid row.
1867 bool ResultSetStorage::isEmptyEntry(const size_t entry_idx, const int8_t* buff) const {
1870  return false;
1871  }
1873  return isEmptyEntryColumnar(entry_idx, buff);
1874  }
1879  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
1880  target_init_vals_.size());
1881  const auto rowwise_target_ptr = row_ptr_rowwise(buff, query_mem_desc_, entry_idx);
1882  const auto target_slot_off =
1884  return read_int_from_buff(rowwise_target_ptr + target_slot_off,
1887  target_init_vals_[query_mem_desc_.getTargetIdxForKey()];
1888  } else {
1889  const auto keys_ptr = row_ptr_rowwise(buff, query_mem_desc_, entry_idx);
1891  case 4:
1894  return *reinterpret_cast<const int32_t*>(keys_ptr) == EMPTY_KEY_32;
1895  case 8:
1896  return *reinterpret_cast<const int64_t*>(keys_ptr) == EMPTY_KEY_64;
1897  default:
1898  CHECK(false);
1899  return true;
1900  }
1901  }
1902 }
1903 
1904 /*
1905  * Returns true if the entry contain empty keys
1906  * This function should only be used with columanr format.
1907  */
1908 bool ResultSetStorage::isEmptyEntryColumnar(const size_t entry_idx,
1909  const int8_t* buff) const {
1913  return false;
1914  }
1919  CHECK_LT(static_cast<size_t>(query_mem_desc_.getTargetIdxForKey()),
1920  target_init_vals_.size());
1921  const auto col_buff = advance_col_buff_to_slot(
1923  const auto entry_buff =
1924  col_buff + entry_idx * query_mem_desc_.getPaddedSlotWidthBytes(
1926  return read_int_from_buff(entry_buff,
1929  target_init_vals_[query_mem_desc_.getTargetIdxForKey()];
1930  } else {
1931  // it's enough to find the first group key which is empty
1933  return reinterpret_cast<const int64_t*>(buff)[entry_idx] == EMPTY_KEY_64;
1934  } else {
1936  const auto target_buff = buff + query_mem_desc_.getPrependedGroupColOffInBytes(0);
1937  switch (query_mem_desc_.groupColWidth(0)) {
1938  case 8:
1939  return reinterpret_cast<const int64_t*>(target_buff)[entry_idx] == EMPTY_KEY_64;
1940  case 4:
1941  return reinterpret_cast<const int32_t*>(target_buff)[entry_idx] == EMPTY_KEY_32;
1942  case 2:
1943  return reinterpret_cast<const int16_t*>(target_buff)[entry_idx] == EMPTY_KEY_16;
1944  case 1:
1945  return reinterpret_cast<const int8_t*>(target_buff)[entry_idx] == EMPTY_KEY_8;
1946  default:
1947  CHECK(false);
1948  }
1949  }
1950  return false;
1951  }
1952  return false;
1953 }
1954 
1955 bool ResultSetStorage::isEmptyEntry(const size_t entry_idx) const {
1956  return isEmptyEntry(entry_idx, buff_);
1957 }
1958 
1960  const InternalTargetValue& val,
1961  const bool float_argument_input) {
1962  if (ti.get_notnull()) {
1963  return false;
1964  }
1965  if (val.isInt()) {
1966  return val.i1 == null_val_bit_pattern(ti, float_argument_input);
1967  }
1968  if (val.isPair()) {
1969  return !val.i2 ||
1970  pair_to_double({val.i1, val.i2}, ti, float_argument_input) == NULL_DOUBLE;
1971  }
1972  if (val.isStr()) {
1973  return !val.i1;
1974  }
1975  CHECK(val.isNull());
1976  return true;
1977 }
const SQLTypeInfo type
Definition: ResultSet.h:232
#define CHECK_EQ(x, y)
Definition: Logger.h:195
std::vector< TargetValue > getNextRowImpl(const bool translate_strings, const bool decimal_to_double) const
#define NULL_DOUBLE
Definition: sqltypes.h:177
bool isNull() const
Definition: TargetValue.h:69
bool isPair() const
Definition: TargetValue.h:67
void d(const SQLTypes expected_type, const std::string &str)
Definition: ImportTest.cpp:268
#define EMPTY_KEY_64
bool is_fp() const
Definition: sqltypes.h:450
ssize_t getTargetGroupbyIndex(const size_t target_idx) const
double decimal_to_double(const SQLTypeInfo &otype, int64_t oval)
std::vector< std::unique_ptr< ResultSetStorage > > appended_storage_
Definition: ResultSet.h:776
const std::vector< const int8_t * > & getColumnFrag(const size_t storge_idx, const size_t col_logical_idx, int64_t &global_idx) const
GeoReturnType geo_return_type_
Definition: ResultSet.h:813
DEVICE NEVER_INLINE int64_t SUFFIX() fixed_width_int_decode_noinline(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:83
ExecutorDeviceType
TargetValue build_string_array_target_value(const int32_t *buff, const size_t buff_sz, const int dict_id, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
bool is_null
Definition: sqltypes.h:73
#define NULL_BIGINT
Definition: sqltypes.h:175
T advance_to_next_columnar_target_buff(T target_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t target_slot_idx)
size_t entryCount() const
SQLTypeInfo sql_type
Definition: TargetInfo.h:42
HOST DEVICE bool get_notnull() const
Definition: sqltypes.h:326
std::unique_ptr< ArrayDatum > lazy_fetch_chunk(const int8_t *ptr, const int64_t varlen_ptr)
const Executor * executor_
Definition: ResultSet.h:785
unsigned long long CUdeviceptr
Definition: nocuda.h:27
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:319
static bool isNull(const SQLTypeInfo &ti, const InternalTargetValue &val, const bool float_argument_input)
static auto fetch(const ResultSet::GeoReturnType return_type, T &&... vals)
QueryMemoryDescriptor query_mem_desc_
Definition: ResultSet.h:774
#define UNREACHABLE()
Definition: Logger.h:231
#define CHECK_GE(x, y)
Definition: Logger.h:200
int64_t const int32_t sz
std::unique_ptr< ResultSetStorage > storage_
Definition: ResultSet.h:775
std::pair< ssize_t, size_t > getStorageIndex(const size_t entry_idx) const
Definition: ResultSet.cpp:594
bool isGeoColOnGpu(const size_t col_idx) const
Constants for Builtin SQL Types supported by MapD.
size_t getBufferSizeBytes(const ExecutorDeviceType device_type) const
int64_t read_int_from_buff(const int8_t *ptr, const int8_t compact_sz)
size_t keep_first_
Definition: ResultSet.h:780
int64_t lazy_decode(const ColumnLazyFetchInfo &col_lazy_fetch, const int8_t *byte_stream, const int64_t pos)
TargetValue make_avg_target_value(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info)
double pair_to_double(const std::pair< int64_t, int64_t > &fp_pair, const SQLTypeInfo &ti, const bool float_argument_input)
bool takes_float_argument(const TargetInfo &target_info)
Definition: TargetInfo.h:120
std::vector< SerializedVarlenBufferStorage > serialized_varlen_buffer_
Definition: ResultSet.h:805
std::vector< std::pair< const int8_t *, const int64_t > > make_vals_vector(std::index_sequence< indices... >, const Tuple &tuple)
std::vector< TargetValue > getNextRowUnlocked(const bool translate_strings, const bool decimal_to_double) const
T advance_target_ptr_row_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
#define CHECK_GT(x, y)
Definition: Logger.h:199
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:327
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
DEVICE void ChunkIter_get_nth(ChunkIter *it, int n, bool uncompress, VarlenDatum *result, bool *is_end)
Definition: ChunkIter.cpp:181
static TargetValue build(const SQLTypeInfo &geo_ti, const ResultSet::GeoReturnType return_type, T &&... vals)
std::vector< TargetValue > getRowAt(const size_t index) const
std::string to_string(char const *&&v)
std::vector< uint32_t > permutation_
Definition: ResultSet.h:782
SQLTypeInfo agg_arg_type
Definition: TargetInfo.h:43
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
const int local_col_id
Definition: ResultSet.h:231
int8_t * pointer
Definition: sqltypes.h:72
const size_t key_bytes_with_padding_
Definition: ResultSet.h:668
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
const Executor * getExecutor() const
bool isEmptyEntry(const size_t entry_idx, const int8_t *buff) const
const ResultSet * result_set_
Definition: ResultSet.h:663
bool isStr() const
Definition: TargetValue.h:71
const int8_t * advance_col_buff_to_slot(const int8_t *buff, const QueryMemoryDescriptor &query_mem_desc, const std::vector< TargetInfo > &targets, const size_t slot_idx, const bool separate_varlen_storage)
Definition: sqldefs.h:71
Serialization routines for geospatial types.
const SQLTypeInfo get_compact_type(const TargetInfo &target)
std::string get_type_name() const
Definition: sqltypes.h:422
OneIntegerColumnRow getOneColRow(const size_t index) const
bool isInt() const
Definition: TargetValue.h:65
size_t global_entry_idx_
Definition: ResultSet.h:274
bool is_array() const
Definition: sqltypes.h:454
const std::vector< TargetInfo > targets_
Definition: ResultSet.h:771
const std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Definition: ResultSet.h:781
bool is_integer() const
Definition: sqltypes.h:448
size_t get_bit_width(const SQLTypeInfo &ti)
std::vector< TargetValue > getRowAtNoTranslations(const size_t index, const std::vector< bool > &targets_to_skip={}) const
size_t drop_first_
Definition: ResultSet.h:779
bool is_agg
Definition: TargetInfo.h:40
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
Classes representing a parse tree.
int64_t count_distinct_set_size(const int64_t set_handle, const CountDistinctDescriptor &count_distinct_desc)
Definition: CountDistinct.h:75
InternalTargetValue getVarlenOrderEntry(const int64_t str_ptr, const size_t str_len) const
std::string getString(int32_t string_id) const
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:40
TargetValue getTargetValueFromBufferColwise(const int8_t *col_ptr, const int8_t *keys_ptr, const QueryMemoryDescriptor &query_mem_desc, const size_t local_entry_idx, const size_t global_entry_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double) const
bool is_decimal() const
Definition: sqltypes.h:449
void copy_from_gpu(Data_Namespace::DataMgr *data_mgr, void *dst, const CUdeviceptr src, const size_t num_bytes, const int device_id)
StorageLookupResult findStorage(const size_t entry_idx) const
Definition: ResultSet.cpp:615
size_t get_byteoff_of_slot(const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc)
DEVICE NEVER_INLINE int64_t SUFFIX() fixed_width_unsigned_decode_noinline(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:90
CountDistinctDescriptors count_distinct_descriptors_
Definition: sqldefs.h:71
bool is_distinct_target(const TargetInfo &target_info)
Definition: TargetInfo.h:116
boost::optional< std::vector< ScalarTargetValue > > ArrayTargetValue
Definition: TargetValue.h:157
SQLTypeInfoCore get_elem_type() const
Definition: sqltypes.h:628
#define EMPTY_KEY_8
bool isRowAtEmpty(const size_t index) const
#define NULL_INT
Definition: sqltypes.h:174
const std::vector< ColumnLazyFetchInfo > lazy_fetch_info_
Definition: ResultSet.h:792
size_t getPaddedColWidthForRange(const size_t offset, const size_t range) const
size_t targetGroupbyIndicesSize() const
SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities > SQLTypeInfo
Definition: sqltypes.h:819
T row_ptr_rowwise(T buff, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_idx)
SQLAgg agg_kind
Definition: TargetInfo.h:41
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:416
#define UNLIKELY(x)
Definition: likely.h:20
int8_t groupColWidth(const size_t key_idx) const
int32_t getTargetIdxForKey() const
std::vector< TargetValue > getNextRow(const bool translate_strings, const bool decimal_to_double) const
#define CHECK_LT(x, y)
Definition: Logger.h:197
bool is_real_str_or_array(const TargetInfo &target_info)
DEVICE NEVER_INLINE int64_t SUFFIX() fixed_width_small_date_decode_noinline(const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
Definition: DecodersImpl.h:141
TargetValue getTargetValueFromBufferRowwise(int8_t *rowwise_target_ptr, int8_t *keys_ptr, const size_t entry_buff_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t slot_idx, const bool translate_strings, const bool decimal_to_double, const bool fixup_count_distinct_pointers) const
#define CHECK_LE(x, y)
Definition: Logger.h:198
int64_t int_resize_cast(const int64_t ival, const size_t sz)
bool is_null(const T &v, const SQLTypeInfo &t)
int64_t lazyReadInt(const int64_t ival, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
InternalTargetValue getColumnInternal(const int8_t *buff, const size_t entry_idx, const size_t target_logical_idx, const StorageLookupResult &storage_lookup_result) const
#define EMPTY_KEY_16
std::vector< std::vector< std::vector< const int8_t * > > > col_buffers_
Definition: ResultSet.h:793
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
TargetValue makeTargetValue(const int8_t *ptr, const int8_t compact_sz, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const bool decimal_to_double, const size_t entry_buff_idx) const
static auto yieldGpuDatumFetcher(Data_Namespace::DataMgr *data_mgr_ptr, const int device_id)
HOST DEVICE int get_comp_param() const
Definition: sqltypes.h:328
const bool is_lazily_fetched
Definition: ResultSet.h:230
std::vector< std::vector< int64_t > > consistent_frag_sizes_
Definition: ResultSet.h:795
TargetValue makeVarlenTargetValue(const int8_t *ptr1, const int8_t compact_sz1, const int8_t *ptr2, const int8_t compact_sz2, const TargetInfo &target_info, const size_t target_logical_idx, const bool translate_strings, const size_t entry_buff_idx) const
const ExecutorDeviceType device_type_
Definition: ResultSet.h:772
#define NULL_SMALLINT
Definition: sqltypes.h:173
const int8_t * get_rowwise_ptr(const int8_t *buff, const size_t entry_idx) const
Definition: ResultSet.h:656
#define CHECK(condition)
Definition: Logger.h:187
static auto fetch(const ResultSet::GeoReturnType return_type, Data_Namespace::DataMgr *data_mgr, const bool fetch_data_from_gpu, const int device_id, T &&... vals)
#define EMPTY_KEY_32
bool isEmptyEntryColumnar(const size_t entry_idx, const int8_t *buff) const
uint64_t exp_to_scale(const unsigned exp)
size_t crt_row_buff_idx_
Definition: ResultSet.h:777
void copyColumnIntoBuffer(const size_t column_idx, int8_t *output_buffer, const size_t output_buffer_size) const
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
bool is_geometry() const
Definition: sqltypes.h:458
const int8_t getLogicalSlotWidthBytes(const size_t slot_idx) const
DEVICE NEVER_INLINE float SUFFIX() fixed_width_float_decode_noinline(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:113
std::vector< std::vector< std::vector< int64_t > > > frag_offsets_
Definition: ResultSet.h:794
DEVICE NEVER_INLINE double SUFFIX() fixed_width_double_decode_noinline(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:126
Basic constructors and methods of the row set interface.
bool separate_varlen_storage_valid_
Definition: ResultSet.h:806
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
TargetValue makeGeoTargetValue(const int8_t *geo_target_ptr, const size_t slot_idx, const TargetInfo &target_info, const size_t target_logical_idx, const size_t entry_buff_idx) const
size_t advanceCursorToNextEntry() const
const CountDistinctDescriptor & getCountDistinctDescriptor(const size_t idx) const
T advance_target_ptr_col_wise(T target_ptr, const TargetInfo &target_info, const size_t slot_idx, const QueryMemoryDescriptor &query_mem_desc, const bool separate_varlen_storage)
ScalarTargetValue make_scalar_tv(const double val)
QueryDescriptionType getQueryDescriptionType() const
Definition: sqldefs.h:71
size_t crt_row_buff_idx_
Definition: ResultSet.h:273
std::vector< std::vector< TargetOffsets > > offsets_for_storage_
Definition: ResultSet.h:661
T get_cols_ptr(T buff, const QueryMemoryDescriptor &query_mem_desc)
Definition: sqldefs.h:71
bool is_string() const
Definition: sqltypes.h:446
bool global_entry_idx_valid_
Definition: ResultSet.h:275
std::unique_ptr< VarlenDatum > VarlenDatumPtr
bool isSingleColumnGroupByWithPerfectHash() const
#define IS_GEO(T)
Definition: sqltypes.h:165
size_t get_key_bytes_rowwise(const QueryMemoryDescriptor &query_mem_desc)
const int8_t * columnar_elem_ptr(const size_t entry_idx, const int8_t *col1_ptr, const int8_t compact_sz1)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
bool isFastColumnarConversionPossible() const
Definition: ResultSet.h:507
size_t getPrependedGroupColOffInBytes(const size_t group_idx) const
std::pair< int64_t, int64_t > get_frag_id_and_local_idx(const std::vector< std::vector< T >> &frag_offsets, const size_t tab_or_col_idx, const int64_t global_idx)
std::conditional_t< isCudaCC(), DeviceArrayDatum, HostArrayDatum > ArrayDatum
Definition: sqltypes.h:119
std::unique_ptr< ArrayDatum > fetch_data_from_gpu(int64_t varlen_ptr, const int64_t length, Data_Namespace::DataMgr *data_mgr, const int device_id)
size_t getEffectiveKeyWidth() const
TargetValue build_array_target_value(const SQLTypeInfo &array_ti, const int8_t *buff, const size_t buff_sz, const bool translate_strings, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const Executor *executor)
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156
size_t length
Definition: sqltypes.h:71
boost::variant< GeoPointTargetValuePtr, GeoLineStringTargetValuePtr, GeoPolyTargetValuePtr, GeoMultiPolyTargetValuePtr > GeoTargetValuePtr
Definition: TargetValue.h:165
const int device_id_
Definition: ResultSet.h:773