OmniSciDB  bf83d84833
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GpuInterrupt.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "DynamicWatchdog.h"
18 #include "Execute.h"
19 
20 void Executor::registerActiveModule(void* module, const int device_id) const {
21 #ifdef HAVE_CUDA
22  std::lock_guard<std::mutex> lock(gpu_active_modules_mutex_);
23  CHECK_LT(device_id, max_gpu_count);
24  gpu_active_modules_device_mask_ |= (1 << device_id);
25  gpu_active_modules_[device_id] = module;
26  VLOG(1) << "Executor " << this << ", mask 0x" << std::hex
27  << gpu_active_modules_device_mask_ << ": Registered module " << module
28  << " on device " << std::to_string(device_id);
29 #endif
30 }
31 
32 void Executor::unregisterActiveModule(void* module, const int device_id) const {
33 #ifdef HAVE_CUDA
34  std::lock_guard<std::mutex> lock(gpu_active_modules_mutex_);
35  CHECK_LT(device_id, max_gpu_count);
36  if ((gpu_active_modules_device_mask_ & (1 << device_id)) == 0) {
37  return;
38  }
39  CHECK_EQ(gpu_active_modules_[device_id], module);
40  gpu_active_modules_device_mask_ ^= (1 << device_id);
41  VLOG(1) << "Executor " << this << ", mask 0x" << std::hex
42  << gpu_active_modules_device_mask_ << ": Unregistered module " << module
43  << " on device " << std::to_string(device_id);
44 #endif
45 }
46 
47 void Executor::interrupt(const std::string& query_session,
48  const std::string& interrupt_session) {
50  bool is_running_query = false;
51  {
52  // here we validate the requested query session is valid (is already enrolled)
53  // if not, we skip the interrupt request
54  mapd_shared_lock<mapd_shared_mutex> session_read_lock(executor_session_mutex_);
55  if (!checkIsQuerySessionEnrolled(query_session, session_read_lock)) {
56  VLOG(1) << "Skip the interrupt request (no query has been submitted from the "
57  "given query session)";
58  return;
59  }
60  if (checkIsQuerySessionInterrupted(query_session, session_read_lock)) {
61  VLOG(1) << "Skip the interrupt request (already interrupted query session)";
62  return;
63  }
64  // if a query is pending query, we just need to turn interrupt flag for the session
65  // on (not sending interrupt signal to "RUNNING" kernel, see the below code)
66  is_running_query = checkCurrentQuerySession(query_session, session_read_lock);
67  }
68  {
69  // We have to cover interrupt request from *any* session because we don't know
70  // whether the request is for the running query or pending query
71  // (or just false alarm that indicates unregistered session in a queue).
72  // So we try to set a session has been interrupted once we confirm
73  // the session has been enrolled and is not interrupted at this moment
74  mapd_unique_lock<mapd_shared_mutex> session_write_lock(executor_session_mutex_);
75  setQuerySessionAsInterrupted(query_session, session_write_lock);
76  }
77  if (!is_running_query) {
78  return;
79  }
80  interrupted_.store(true);
81  }
82 
83  bool CPU_execution_mode = true;
84 
85 #ifdef HAVE_CUDA
86  // The below code is basically for runtime query interrupt for GPU.
87  // It is also possible that user forces to use CPU-mode even if the user has GPU(s).
88  // In this case, we should not execute the code in below to avoid runtime failure
91  CHECK_GE(cuda_mgr->getDeviceCount(), 1);
92  std::lock_guard<std::mutex> lock(gpu_active_modules_mutex_);
93  VLOG(1) << "Executor " << this << ": Interrupting Active Modules: mask 0x" << std::hex
94  << gpu_active_modules_device_mask_;
95  CUcontext old_cu_context;
96  checkCudaErrors(cuCtxGetCurrent(&old_cu_context));
97  for (int device_id = 0; device_id < max_gpu_count; device_id++) {
98  if (gpu_active_modules_device_mask_ & (1 << device_id)) {
99  void* module = gpu_active_modules_[device_id];
100  auto cu_module = static_cast<CUmodule>(module);
101  if (!cu_module) {
102  continue;
103  } else {
104  VLOG(1) << "Try to interrupt the running query on GPU";
105  CPU_execution_mode = false;
106  }
107  VLOG(1) << "Executor " << this << ": Interrupting Active Modules: mask 0x"
108  << std::hex << gpu_active_modules_device_mask_ << " on device "
109  << std::to_string(device_id);
110 
111  cuda_mgr->setContext(device_id);
112 
113  // Create high priority non-blocking communication stream
114  CUstream cu_stream1;
116  cuStreamCreateWithPriority(&cu_stream1, CU_STREAM_NON_BLOCKING, 1));
117 
118  CUevent start, stop;
119  cuEventCreate(&start, 0);
120  cuEventCreate(&stop, 0);
121  cuEventRecord(start, cu_stream1);
122 
125  size_t dw_abort_size;
126  if (cuModuleGetGlobal(&dw_abort, &dw_abort_size, cu_module, "dw_abort") ==
127  CUDA_SUCCESS) {
128  CHECK_EQ(dw_abort_size, sizeof(uint32_t));
129  int32_t abort_val = 1;
130  checkCudaErrors(cuMemcpyHtoDAsync(dw_abort,
131  reinterpret_cast<void*>(&abort_val),
132  sizeof(int32_t),
133  cu_stream1));
134 
135  if (device_id == 0) {
136  VLOG(1) << "GPU: Async Abort submitted to Device "
137  << std::to_string(device_id);
138  }
139  }
140  }
141 
144  size_t runtime_interrupt_flag_size;
145  auto status = cuModuleGetGlobal(&runtime_interrupt_flag,
146  &runtime_interrupt_flag_size,
147  cu_module,
148  "runtime_interrupt_flag");
149  if (status == CUDA_SUCCESS) {
150  VLOG(1) << "Interrupt on GPU status: CUDA_SUCCESS";
151  CHECK_EQ(runtime_interrupt_flag_size, sizeof(uint32_t));
152  int32_t abort_val = 1;
153  checkCudaErrors(cuMemcpyHtoDAsync(runtime_interrupt_flag,
154  reinterpret_cast<void*>(&abort_val),
155  sizeof(int32_t),
156  cu_stream1));
157  if (device_id == 0) {
158  VLOG(1) << "GPU: Async Abort submitted to Device "
159  << std::to_string(device_id);
160  }
161  } else if (status == CUDA_ERROR_NOT_FOUND) {
162  std::runtime_error(
163  "Runtime query interrupt has failed: an interrupt flag on the GPU could "
164  "not be initialized (CUDA_ERROR_CODE: CUDA_ERROR_NOT_FOUND)");
165  } else {
166  // if we reach here, query runtime interrupt is failed due to
167  // one of the following error: CUDA_ERROR_NOT_INITIALIZED,
168  // CUDA_ERROR_DEINITIALIZED. CUDA_ERROR_INVALID_CONTEXT, and
169  // CUDA_ERROR_INVALID_VALUE. All those error codes are due to device failure.
170  const char* error_ret_str = nullptr;
171  cuGetErrorName(status, &error_ret_str);
172  if (!error_ret_str) {
173  error_ret_str = "UNKNOWN";
174  }
175  std::string error_str(error_ret_str);
176  std::runtime_error(
177  "Runtime interrupt has failed due to a device related issue "
178  "(CUDA_ERROR_CODE: " +
179  error_str + ")");
180  }
181 
182  cuEventRecord(stop, cu_stream1);
183  cuEventSynchronize(stop);
184  float milliseconds = 0;
185  cuEventElapsedTime(&milliseconds, start, stop);
186  VLOG(1) << "Device " << std::to_string(device_id)
187  << ": submitted async request to abort SUCCESS: "
188  << std::to_string(milliseconds) << " ms";
189  checkCudaErrors(cuStreamDestroy(cu_stream1));
190  }
191  }
192  checkCudaErrors(cuCtxSetCurrent(old_cu_context));
193  }
194  }
195 #endif
197  dynamic_watchdog_init(static_cast<unsigned>(DW_ABORT));
198  }
199 
200  if (g_enable_runtime_query_interrupt && CPU_execution_mode) {
201  // turn interrupt flag on for CPU mode
202  VLOG(1) << "Try to interrupt the running query on CPU";
203  check_interrupt_init(static_cast<unsigned>(INT_ABORT));
204  }
205 }
206 
208 #ifdef HAVE_CUDA
209  std::lock_guard<std::mutex> lock(gpu_active_modules_mutex_);
210 #endif
211 
213  dynamic_watchdog_init(static_cast<unsigned>(DW_RESET));
215  check_interrupt_init(static_cast<unsigned>(INT_RESET));
216  }
217 
218  if (interrupted_.load()) {
219  VLOG(1) << "RESET Executor " << this << " that had previously been interrupted";
220  interrupted_.store(false);
221  }
222 }
CudaMgr_Namespace::CudaMgr * getCudaMgr() const
Definition: DataMgr.h:206
#define CHECK_EQ(x, y)
Definition: Logger.h:205
int CUcontext
Definition: nocuda.h:22
void checkCudaErrors(CUresult err)
Definition: sample.cpp:38
void registerActiveModule(void *module, const int device_id) const
unsigned long long CUdeviceptr
Definition: nocuda.h:27
#define CHECK_GE(x, y)
Definition: Logger.h:210
bool g_enable_dynamic_watchdog
Definition: Execute.cpp:77
std::string to_string(char const *&&v)
Data_Namespace::DataMgr & getDataMgr() const
Definition: SysCatalog.h:189
static SysCatalog & instance()
Definition: SysCatalog.h:288
void interrupt(const std::string &query_session="", const std::string &interrupt_session="")
uint64_t dynamic_watchdog_init(unsigned ms_budget)
__device__ int32_t runtime_interrupt_flag
Definition: cuda_mapd_rt.cu:96
#define CHECK_LT(x, y)
Definition: Logger.h:207
void unregisterActiveModule(void *module, const int device_id) const
void resetInterrupt()
bool check_interrupt_init(unsigned command)
__device__ int32_t dw_abort
Definition: cuda_mapd_rt.cu:95
bool g_enable_runtime_query_interrupt
Definition: Execute.cpp:110
#define VLOG(n)
Definition: Logger.h:291
void * CUmodule
Definition: nocuda.h:23