OmniSciDB  ca0c39ec8f
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
StreamingTopN.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "StreamingTopN.h"
18 #include "RelAlgExecutor.h"
19 #include "Shared/checked_alloc.h"
20 #include "TopKSort.h"
21 
22 namespace streaming_top_n {
23 
24 size_t get_heap_size(const size_t row_size, const size_t n, const size_t thread_count) {
25  const auto row_size_quad = row_size / sizeof(int64_t);
26  return (1 + n + row_size_quad * n) * thread_count * sizeof(int64_t);
27 }
28 
29 size_t get_rows_offset_of_heaps(const size_t n, const size_t thread_count) {
30  return (1 + n) * thread_count * sizeof(int64_t);
31 }
32 
33 std::vector<int8_t> get_rows_copy_from_heaps(const int64_t* heaps,
34  const size_t heaps_size,
35  const size_t n,
36  const size_t thread_count) {
37  const auto rows_offset = streaming_top_n::get_rows_offset_of_heaps(n, thread_count);
38  const auto row_buff_size = heaps_size - rows_offset;
39  std::vector<int8_t> rows_copy(row_buff_size);
40  const auto rows_ptr = reinterpret_cast<const int8_t*>(heaps) + rows_offset;
41  std::memcpy(&rows_copy[0], rows_ptr, row_buff_size);
42  return rows_copy;
43 }
44 
45 } // namespace streaming_top_n
46 
47 size_t get_heap_key_slot_index(const std::vector<Analyzer::Expr*>& target_exprs,
48  const size_t target_idx) {
49  size_t slot_idx = 0;
50  for (size_t i = 0; i < target_idx; ++i) {
51  auto agg_info = get_target_info(target_exprs[i], g_bigint_count);
52  slot_idx = advance_slot(slot_idx, agg_info, false);
53  }
54  return slot_idx;
55 }
56 
57 #ifdef HAVE_CUDA
58 std::vector<int8_t> pick_top_n_rows_from_dev_heaps(
59  Data_Namespace::DataMgr* data_mgr,
60  const int64_t* dev_heaps_buffer,
61  const RelAlgExecutionUnit& ra_exe_unit,
63  const size_t thread_count,
64  const int device_id) {
65  CHECK(!query_mem_desc.canOutputColumnar());
66  CHECK_EQ(ra_exe_unit.sort_info.order_entries.size(), size_t(1));
67  const auto& only_oe = ra_exe_unit.sort_info.order_entries.back();
68  const auto oe_col_idx = only_oe.tle_no - 1;
69  const auto n = ra_exe_unit.sort_info.offset + ra_exe_unit.sort_info.limit;
70  const auto group_key_bytes = query_mem_desc.getEffectiveKeyWidth();
71  const PodOrderEntry pod_oe{only_oe.tle_no, only_oe.is_desc, only_oe.nulls_first};
72  const auto key_slot_idx = get_heap_key_slot_index(ra_exe_unit.target_exprs, oe_col_idx);
73  GroupByBufferLayoutInfo oe_layout{
74  n * thread_count,
75  query_mem_desc.getColOffInBytes(key_slot_idx),
76  static_cast<size_t>(query_mem_desc.getPaddedSlotWidthBytes(oe_col_idx)),
77  query_mem_desc.getRowSize(),
78  get_target_info(ra_exe_unit.target_exprs[oe_col_idx], g_bigint_count),
79  -1};
81  data_mgr,
82  dev_heaps_buffer,
83  query_mem_desc.getBufferSizeBytes(
84  ra_exe_unit, thread_count, ExecutorDeviceType::GPU),
85  n,
86  pod_oe,
87  oe_layout,
88  group_key_bytes,
89  thread_count,
90  device_id);
91 }
92 #endif // HAVE_CUDA
std::vector< Analyzer::Expr * > target_exprs
#define CHECK_EQ(x, y)
Definition: Logger.h:230
size_t getBufferSizeBytes(const RelAlgExecutionUnit &ra_exe_unit, const unsigned thread_count, const ExecutorDeviceType device_type) const
Streaming Top N algorithm.
size_t get_rows_offset_of_heaps(const size_t n, const size_t thread_count)
const std::list< Analyzer::OrderEntry > order_entries
size_t getEffectiveKeyWidth() const
std::vector< int8_t > pop_n_rows_from_merged_heaps_gpu(Data_Namespace::DataMgr *data_mgr, const int64_t *dev_heaps, const size_t heaps_size, const size_t n, const PodOrderEntry &oe, const GroupByBufferLayoutInfo &layout, const size_t group_key_bytes, const size_t thread_count, const int device_id)
Definition: TopKSort.cu:309
size_t get_heap_key_slot_index(const std::vector< Analyzer::Expr * > &target_exprs, const size_t target_idx)
TargetInfo get_target_info(const Analyzer::Expr *target_expr, const bool bigint_count)
Definition: TargetInfo.h:97
const size_t limit
size_t advance_slot(const size_t j, const TargetInfo &target_info, const bool separate_varlen_storage)
bool g_bigint_count
const int8_t getPaddedSlotWidthBytes(const size_t slot_idx) const
int tle_no
std::vector< int8_t > get_rows_copy_from_heaps(const int64_t *heaps, const size_t heaps_size, const size_t n, const size_t thread_count)
size_t get_heap_size(const size_t row_size, const size_t n, const size_t thread_count)
#define CHECK(condition)
Definition: Logger.h:222
constexpr double n
Definition: Utm.h:38
const size_t offset
size_t getColOffInBytes(const size_t col_idx) const