OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
QueryExecutionContext.h
Go to the documentation of this file.
1 /*
2  * Copyright 2018 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef QUERYENGINE_QUERYEXECUTIONCONTEXT_H
18 #define QUERYENGINE_QUERYEXECUTIONCONTEXT_H
19 
20 #include "CompilationOptions.h"
22 #include "GpuMemUtils.h"
23 #include "Rendering/RenderInfo.h"
24 #include "ResultSet.h"
25 
26 #include "QueryMemoryInitializer.h"
27 
28 #include <boost/core/noncopyable.hpp>
29 #include <vector>
30 
33 
34 struct RelAlgExecutionUnit;
36 class Executor;
37 
38 class QueryExecutionContext : boost::noncopyable {
39  public:
40  // TODO(alex): remove device_type
41  QueryExecutionContext(const RelAlgExecutionUnit& ra_exe_unit,
42  const QueryMemoryDescriptor&,
43  const Executor* executor,
44  const ExecutorDeviceType device_type,
45  const ExecutorDispatchMode dispatch_mode,
46  const int device_id,
47  const int64_t num_rows,
48  const std::vector<std::vector<const int8_t*>>& col_buffers,
49  const std::vector<std::vector<uint64_t>>& frag_offsets,
50  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
51  const bool output_columnar,
52  const bool sort_on_gpu,
53  RenderInfo*);
54 
55  ResultSetPtr getRowSet(const RelAlgExecutionUnit& ra_exe_unit,
57 
58  ResultSetPtr groupBufferToResults(const size_t i) const;
59 
60  std::vector<int64_t*> launchGpuCode(
61  const RelAlgExecutionUnit& ra_exe_unit,
62  const GpuCompilationContext* cu_functions,
63  const bool hoist_literals,
64  const std::vector<int8_t>& literal_buff,
65  std::vector<std::vector<const int8_t*>> col_buffers,
66  const std::vector<std::vector<int64_t>>& num_rows,
67  const std::vector<std::vector<uint64_t>>& frag_row_offsets,
68  const int32_t scan_limit,
69  Data_Namespace::DataMgr* data_mgr,
70  const unsigned block_size_x,
71  const unsigned grid_size_x,
72  const int device_id,
73  const size_t shared_memory_size,
74  int32_t* error_code,
75  const uint32_t num_tables,
76  const std::vector<int64_t>& join_hash_tables,
77  RenderAllocatorMap* render_allocator_map);
78 
79  std::vector<int64_t*> launchCpuCode(
80  const RelAlgExecutionUnit& ra_exe_unit,
81  const CpuCompilationContext* fn_ptrs,
82  const bool hoist_literals,
83  const std::vector<int8_t>& literal_buff,
84  std::vector<std::vector<const int8_t*>> col_buffers,
85  const std::vector<std::vector<int64_t>>& num_rows,
86  const std::vector<std::vector<uint64_t>>& frag_row_offsets,
87  const int32_t scan_limit,
88  int32_t* error_code,
89  const uint32_t num_tables,
90  const std::vector<int64_t>& join_hash_tables);
91 
92  int64_t getAggInitValForIndex(const size_t index) const;
93 
94  private:
95 #ifdef HAVE_CUDA
96  enum {
98  NUM_FRAGMENTS,
99  LITERALS,
100  NUM_ROWS,
101  FRAG_ROW_OFFSETS,
102  MAX_MATCHED,
103  TOTAL_MATCHED,
104  INIT_AGG_VALS,
105  GROUPBY_BUF,
106  ERROR_CODE,
107  NUM_TABLES,
108  JOIN_HASH_TABLES,
109  KERN_PARAM_COUNT,
110  };
111 
112  void initializeDynamicWatchdog(void* native_module, const int device_id) const;
113 
114  void initializeRuntimeInterrupter(void* native_module, const int device_id) const;
115 
116  std::vector<CUdeviceptr> prepareKernelParams(
117  const std::vector<std::vector<const int8_t*>>& col_buffers,
118  const std::vector<int8_t>& literal_buff,
119  const std::vector<std::vector<int64_t>>& num_rows,
120  const std::vector<std::vector<uint64_t>>& frag_offsets,
121  const int32_t scan_limit,
122  const std::vector<int64_t>& init_agg_vals,
123  const std::vector<int32_t>& error_codes,
124  const uint32_t num_tables,
125  const std::vector<int64_t>& join_hash_tables,
126  Data_Namespace::DataMgr* data_mgr,
127  const int device_id,
128  const bool hoist_literals,
129  const bool is_group_by) const;
130 #endif // HAVE_CUDA
131 
132  ResultSetPtr groupBufferToDeinterleavedResults(const size_t i) const;
133 
134  std::unique_ptr<CudaAllocator> gpu_allocator_;
135 
136  // TODO(adb): convert to shared_ptr
141  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner_;
142  const bool output_columnar_;
143  std::unique_ptr<QueryMemoryInitializer> query_buffers_;
144  mutable std::unique_ptr<ResultSet> estimator_result_set_;
145 
146  friend class Executor;
147 };
148 
149 #endif // QUERYENGINE_QUERYEXECUTIONCONTEXT_H
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
ExecutorDeviceType
const ExecutorDispatchMode dispatch_mode_
std::shared_ptr< ResultSet > ResultSetPtr
const ExecutorDeviceType device_type_
std::vector< int64_t * > launchGpuCode(const RelAlgExecutionUnit &ra_exe_unit, const GpuCompilationContext *cu_functions, const bool hoist_literals, const std::vector< int8_t > &literal_buff, std::vector< std::vector< const int8_t * >> col_buffers, const std::vector< std::vector< int64_t >> &num_rows, const std::vector< std::vector< uint64_t >> &frag_row_offsets, const int32_t scan_limit, Data_Namespace::DataMgr *data_mgr, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const size_t shared_memory_size, int32_t *error_code, const uint32_t num_tables, const std::vector< int64_t > &join_hash_tables, RenderAllocatorMap *render_allocator_map)
ExecutorDispatchMode
std::unique_ptr< QueryMemoryInitializer > query_buffers_
ResultSetPtr getRowSet(const RelAlgExecutionUnit &ra_exe_unit, const QueryMemoryDescriptor &query_mem_desc) const
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
int64_t getAggInitValForIndex(const size_t index) const
QueryExecutionContext(const RelAlgExecutionUnit &ra_exe_unit, const QueryMemoryDescriptor &, const Executor *executor, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const int device_id, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const bool output_columnar, const bool sort_on_gpu, RenderInfo *)
QueryMemoryDescriptor query_mem_desc_
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
ResultSetPtr groupBufferToDeinterleavedResults(const size_t i) const
ResultSetPtr groupBufferToResults(const size_t i) const
std::unique_ptr< CudaAllocator > gpu_allocator_
const int8_t const int64_t const uint64_t * frag_row_offsets
std::vector< int64_t * > launchCpuCode(const RelAlgExecutionUnit &ra_exe_unit, const CpuCompilationContext *fn_ptrs, const bool hoist_literals, const std::vector< int8_t > &literal_buff, std::vector< std::vector< const int8_t * >> col_buffers, const std::vector< std::vector< int64_t >> &num_rows, const std::vector< std::vector< uint64_t >> &frag_row_offsets, const int32_t scan_limit, int32_t *error_code, const uint32_t num_tables, const std::vector< int64_t > &join_hash_tables)
Basic constructors and methods of the row set interface.
Executor(const ExecutorId id, const size_t block_size_x, const size_t grid_size_x, const size_t max_gpu_slab_size, const std::string &debug_dir, const std::string &debug_file)
Definition: Execute.cpp:129
void sort_on_gpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes, ThrustAllocator &alloc)
Allocate GPU memory using GpuBuffers via DataMgr.
std::unique_ptr< ResultSet > estimator_result_set_