OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
NvidiaKernel.h
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include "CudaMgr/CudaMgr.h"
21 
22 #ifdef HAVE_CUDA
23 #include <cuda.h>
24 #else
25 #include "../Shared/nocuda.h"
26 #endif // HAVE_CUDA
27 #include <string>
28 #include <vector>
29 
30 struct CubinResult {
31  void* cubin;
32  std::vector<CUjit_option> option_keys;
33  std::vector<void*> option_values;
35  size_t cubin_size;
36 };
37 
42 void nvidia_jit_warmup();
43 
48 CubinResult ptx_to_cubin(const std::string& ptx,
49  const CudaMgr_Namespace::CudaMgr* cuda_mgr);
50 
52  public:
53  GpuDeviceCompilationContext(const void* image,
54  const size_t module_size,
55  const std::string& kernel_name,
56  const int device_id,
57  const void* cuda_mgr,
58  unsigned int num_options,
59  CUjit_option* options,
60  void** option_vals);
62  CUfunction kernel() { return kernel_; }
63  CUmodule module() { return module_; }
64  std::string const& name() const { return kernel_name_; }
65  size_t getModuleSize() const { return module_size_; }
66 
67  private:
69  size_t module_size_;
71  std::string const kernel_name_;
72 #ifdef HAVE_CUDA
73  const int device_id_;
74  const CudaMgr_Namespace::CudaMgr* cuda_mgr_;
75 #endif // HAVE_CUDA
76 };
77 
79  public:
81 
82  void addDeviceCode(std::unique_ptr<GpuDeviceCompilationContext>&& device_context) {
83  contexts_per_device_.push_back(std::move(device_context));
84  }
85 
86  std::pair<void*, void*> getNativeCode(const size_t device_id) const {
87  CHECK_LT(device_id, contexts_per_device_.size());
88  auto device_context = contexts_per_device_[device_id].get();
89  return std::make_pair<void*, void*>(device_context->kernel(),
90  device_context->module());
91  }
92 
93  std::vector<void*> getNativeFunctionPointers() const {
94  std::vector<void*> fn_ptrs;
95  for (auto& device_context : contexts_per_device_) {
96  CHECK(device_context);
97  fn_ptrs.push_back(device_context->kernel());
98  }
99  return fn_ptrs;
100  }
101 
102  std::string const& name(size_t const device_id) const {
103  CHECK_LT(device_id, contexts_per_device_.size());
104  return contexts_per_device_[device_id]->name();
105  }
106 
107  size_t getMemSize() const {
108  return contexts_per_device_.begin()->get()->getModuleSize();
109  }
110 
111  private:
112  std::vector<std::unique_ptr<GpuDeviceCompilationContext>> contexts_per_device_;
113 };
114 
115 #ifdef HAVE_CUDA
116 inline std::string ourCudaErrorStringHelper(CUresult error) {
117  char const* c1;
118  CUresult res1 = cuGetErrorName(error, &c1);
119  char const* c2;
120  CUresult res2 = cuGetErrorString(error, &c2);
121  std::string text;
122  if (res1 == CUDA_SUCCESS) {
123  text += c1;
124  text += " (";
125  text += std::to_string(error);
126  text += ")";
127  }
128  if (res2 == CUDA_SUCCESS) {
129  if (!text.empty()) {
130  text += ": ";
131  }
132  text += c2;
133  }
134  if (text.empty()) {
135  text = std::to_string(error); // never return an empty error string
136  }
137  return text;
138 }
139 
140 #define checkCudaErrors(ARG) \
141  if (CUresult const err = static_cast<CUresult>(ARG); err != CUDA_SUCCESS) \
142  CHECK_EQ(err, CUDA_SUCCESS) << ourCudaErrorStringHelper(err)
143 #endif // HAVE_CUDA
int CUjit_option
Definition: nocuda.h:26
size_t getModuleSize() const
Definition: NvidiaKernel.h:65
std::string const & name() const
Definition: NvidiaKernel.h:64
void nvidia_jit_warmup()
std::pair< void *, void * > getNativeCode(const size_t device_id) const
Definition: NvidiaKernel.h:86
void * cubin
Definition: NvidiaKernel.h:31
std::string to_string(char const *&&v)
std::vector< CUjit_option > option_keys
Definition: NvidiaKernel.h:32
std::string const & name(size_t const device_id) const
Definition: NvidiaKernel.h:102
std::string const kernel_name_
Definition: NvidiaKernel.h:71
void addDeviceCode(std::unique_ptr< GpuDeviceCompilationContext > &&device_context)
Definition: NvidiaKernel.h:82
std::vector< void * > getNativeFunctionPointers() const
Definition: NvidiaKernel.h:93
void * CUfunction
Definition: nocuda.h:25
CubinResult ptx_to_cubin(const std::string &ptx, const CudaMgr_Namespace::CudaMgr *cuda_mgr)
std::vector< std::unique_ptr< GpuDeviceCompilationContext > > contexts_per_device_
Definition: NvidiaKernel.h:112
GpuDeviceCompilationContext(const void *image, const size_t module_size, const std::string &kernel_name, const int device_id, const void *cuda_mgr, unsigned int num_options, CUjit_option *options, void **option_vals)
std::vector< void * > option_values
Definition: NvidiaKernel.h:33
int CUlinkState
Definition: nocuda.h:27
#define CHECK_LT(x, y)
Definition: Logger.h:303
size_t cubin_size
Definition: NvidiaKernel.h:35
int CUresult
Definition: nocuda.h:21
#define CHECK(condition)
Definition: Logger.h:291
CUlinkState link_state
Definition: NvidiaKernel.h:34
size_t getMemSize() const
Definition: NvidiaKernel.h:107
void * CUmodule
Definition: nocuda.h:24