OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InPlaceSortImpl.h File Reference
#include <cstdint>
+ Include dependency graph for InPlaceSortImpl.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void sort_on_gpu (int64_t *val_buff, int32_t *key_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes, ThrustAllocator &alloc, const int device_id)
 
void sort_on_cpu (int64_t *val_buff, int32_t *key_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)
 
void apply_permutation_on_gpu (int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const uint32_t chosen_bytes, ThrustAllocator &alloc, const int device_id)
 
void apply_permutation_on_cpu (int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)
 

Function Documentation

void apply_permutation_on_cpu ( int64_t *  val_buff,
int32_t *  idx_buff,
const uint64_t  entry_count,
int64_t *  tmp_buff,
const uint32_t  chosen_bytes 
)

Definition at line 193 of file InPlaceSortImpl.cu.

References apply_permutation_on_cpu().

Referenced by apply_permutation_cpu(), and apply_permutation_on_cpu().

197  {
198 #ifdef HAVE_CUDA
199  switch (chosen_bytes) {
200  case 1:
201  apply_permutation_on_cpu(reinterpret_cast<int8_t*>(val_buff),
202  idx_buff,
203  entry_count,
204  reinterpret_cast<int8_t*>(tmp_buff));
205  break;
206  case 2:
207  apply_permutation_on_cpu(reinterpret_cast<int16_t*>(val_buff),
208  idx_buff,
209  entry_count,
210  reinterpret_cast<int16_t*>(tmp_buff));
211  break;
212  case 4:
213  apply_permutation_on_cpu(reinterpret_cast<int32_t*>(val_buff),
214  idx_buff,
215  entry_count,
216  reinterpret_cast<int32_t*>(tmp_buff));
217  break;
218  case 8:
219  apply_permutation_on_cpu(val_buff, idx_buff, entry_count, tmp_buff);
220  break;
221  default:
222  // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
223  break;
224  }
225 #endif
226 }
void apply_permutation_on_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, int64_t *tmp_buff, const uint32_t chosen_bytes)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void apply_permutation_on_gpu ( int64_t *  val_buff,
int32_t *  idx_buff,
const uint64_t  entry_count,
const uint32_t  chosen_bytes,
ThrustAllocator alloc,
const int  device_id 
)

Definition at line 163 of file InPlaceSortImpl.cu.

References apply_permutation_on_gpu().

Referenced by anonymous_namespace{InPlaceSort.cpp}::apply_permutation_gpu(), and apply_permutation_on_gpu().

168  {
169 #ifdef HAVE_CUDA
170  switch (chosen_bytes) {
171  case 1:
173  reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
174  break;
175  case 2:
177  reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
178  break;
179  case 4:
181  reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
182  break;
183  case 8:
184  apply_permutation_on_gpu(val_buff, idx_buff, entry_count, alloc, device_id);
185  break;
186  default:
187  // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
188  break;
189  }
190 #endif
191 }
void apply_permutation_on_gpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const uint32_t chosen_bytes, ThrustAllocator &alloc, const int device_id)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void sort_on_cpu ( int64_t *  val_buff,
int32_t *  key_buff,
const uint64_t  entry_count,
const bool  desc,
const uint32_t  chosen_bytes 
)

Definition at line 137 of file InPlaceSortImpl.cu.

References sort_on_cpu().

Referenced by sort_groups_cpu(), and sort_on_cpu().

141  {
142 #ifdef HAVE_CUDA
143  switch (chosen_bytes) {
144  case 1:
145  sort_on_cpu(reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, desc);
146  break;
147  case 2:
148  sort_on_cpu(reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, desc);
149  break;
150  case 4:
151  sort_on_cpu(reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, desc);
152  break;
153  case 8:
154  sort_on_cpu(val_buff, idx_buff, entry_count, desc);
155  break;
156  default:
157  // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
158  break;
159  }
160 #endif
161 }
void sort_on_cpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void sort_on_gpu ( int64_t *  val_buff,
int32_t *  key_buff,
const uint64_t  entry_count,
const bool  desc,
const uint32_t  chosen_bytes,
ThrustAllocator alloc,
const int  device_id 
)

Definition at line 94 of file InPlaceSortImpl.cu.

References sort_on_gpu().

Referenced by QueryExecutionContext::QueryExecutionContext(), anonymous_namespace{InPlaceSort.cpp}::sort_groups_gpu(), and sort_on_gpu().

100  {
101 #ifdef HAVE_CUDA
102  switch (chosen_bytes) {
103  case 1:
104  sort_on_gpu(reinterpret_cast<int8_t*>(val_buff),
105  idx_buff,
106  entry_count,
107  desc,
108  alloc,
109  device_id);
110  break;
111  case 2:
112  sort_on_gpu(reinterpret_cast<int16_t*>(val_buff),
113  idx_buff,
114  entry_count,
115  desc,
116  alloc,
117  device_id);
118  break;
119  case 4:
120  sort_on_gpu(reinterpret_cast<int32_t*>(val_buff),
121  idx_buff,
122  entry_count,
123  desc,
124  alloc,
125  device_id);
126  break;
127  case 8:
128  sort_on_gpu(val_buff, idx_buff, entry_count, desc, alloc, device_id);
129  break;
130  default:
131  // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
132  break;
133  }
134 #endif
135 }
void sort_on_gpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes, ThrustAllocator &alloc, const int device_id)

+ Here is the call graph for this function:

+ Here is the caller graph for this function: