OmniSciDB  1dac507f6e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GroupByRuntime.cpp File Reference
#include "JoinHashImpl.h"
#include "MurmurHash.h"
+ Include dependency graph for GroupByRuntime.cpp:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define DEF_TRANSLATE_NULL_KEY(key_type)
 

Functions

ALWAYS_INLINE DEVICE uint32_t key_hash (const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
NEVER_INLINE DEVICE bool dynamic_watchdog ()
 
NEVER_INLINE DEVICE int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
ALWAYS_INLINE DEVICE int64_t * get_scan_output_slot (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx (int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val, const int64_t translated_val)
 

Macro Definition Documentation

#define DEF_TRANSLATE_NULL_KEY (   key_type)
Value:
extern "C" NEVER_INLINE DEVICE int64_t translate_null_key_##key_type( \
const key_type key, const key_type null_val, const int64_t translated_val) { \
if (key == null_val) { \
return translated_val; \
} \
return key; \
}
#define DEVICE
#define NEVER_INLINE

Definition at line 407 of file GroupByRuntime.cpp.

Function Documentation

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx ( int64_t  hash_buff,
int64_t const  key,
int64_t const  min_key,
int64_t const  max_key,
int64_t  bucket_normalization 
)

Definition at line 272 of file GroupByRuntime.cpp.

References get_bucketized_hash_slot(), and SUFFIX.

Referenced by bucketized_hash_join_idx_bitwise(), and bucketized_hash_join_idx_nullable().

276  {
277  if (key >= min_key && key <= max_key) {
279  reinterpret_cast<int32_t*>(hash_buff), key, min_key, bucket_normalization);
280  }
281  return -1;
282 }
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_bucketized_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization)
Definition: JoinHashImpl.h:31
#define SUFFIX(name)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val,
const int64_t  bucket_normalization 
)

Definition at line 315 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

321  {
322  return key != null_val ? bucketized_hash_join_idx(
323  hash_buff, key, min_key, max_key, bucket_normalization)
324  : bucketized_hash_join_idx(hash_buff,
325  translated_val,
326  min_key,
327  translated_val,
328  bucket_normalization);
329 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  bucket_normalization 
)

Definition at line 295 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

300  {
301  return key != null_val ? bucketized_hash_join_idx(
302  hash_buff, key, min_key, max_key, bucket_normalization)
303  : -1;
304 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE bool dynamic_watchdog ( )

Definition at line 181 of file cuda_mapd_rt.cu.

References dw_abort, dw_cycle_budget, DW_DEADLINE, dw_sm_cycle_start, dynamic_watchdog_init(), get_smid(), logger::INFO, LOG, and read_cycle_counter().

Referenced by anonymous_namespace{ResultSetReduction.cpp}::check_watchdog(), check_watchdog_rt(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), and get_group_value_with_watchdog().

181  {
182  // check for dynamic watchdog, if triggered all threads return true
183  if (dw_cycle_budget == 0LL) {
184  return false; // Uninitialized watchdog can't check time
185  }
186  if (dw_abort == 1) {
187  return true; // Received host request to abort
188  }
189  uint32_t smid = get_smid();
190  if (smid >= 128) {
191  return false;
192  }
193  __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
194  __shared__ volatile bool
195  dw_should_terminate; // all threads within a block should return together if
196  // watchdog criteria is met
197 
198  // thread 0 either initializes or read the initial clock cycle, the result is stored
199  // into shared memory. Since all threads wihtin a block shares the same SM, there's no
200  // point in using more threads here.
201  if (threadIdx.x == 0) {
202  dw_block_cycle_start = 0LL;
203  int64_t cycle_count = static_cast<int64_t>(clock64());
204  // Make sure the block hasn't switched SMs
205  if (smid == get_smid()) {
206  dw_block_cycle_start = static_cast<int64_t>(
207  atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
208  0ULL,
209  static_cast<unsigned long long>(cycle_count)));
210  }
211 
212  int64_t cycles = cycle_count - dw_block_cycle_start;
213  if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
214  (cycles > dw_cycle_budget)) {
215  // Check if we're out of time on this particular SM
216  dw_should_terminate = true;
217  } else {
218  dw_should_terminate = false;
219  }
220  }
221  __syncthreads();
222  return dw_should_terminate;
223 }
__device__ int64_t dw_sm_cycle_start[128]
__device__ int64_t dw_cycle_budget
__inline__ __device__ uint32_t get_smid(void)
__device__ int32_t dw_abort

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 231 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

234  {
235  int64_t off = key - min_key;
236  if (bucket) {
237  off /= bucket;
238  }
239  if (key_base_ptr[off] == EMPTY_KEY_64) {
240  key_base_ptr[off] = key;
241  }
242  return off;
243 }
#define EMPTY_KEY_64
ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment 
)

Definition at line 260 of file GroupByRuntime.cpp.

263  {
264  if (pos < output_buffer_entry_count) {
265  output_buffer[pos] = offset_in_fragment;
266  return pos;
267  }
268  return -1;
269 }
NEVER_INLINE DEVICE int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 26 of file GroupByRuntime.cpp.

References get_matching_group_value(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

33  {
34  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
35  int64_t* matching_group = get_matching_group_value(
36  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
37  if (matching_group) {
38  return matching_group;
39  }
40  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
41  while (h_probe != h) {
42  matching_group = get_matching_group_value(
43  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
44  if (matching_group) {
45  return matching_group;
46  }
47  h_probe = (h_probe + 1) % groups_buffer_entry_count;
48  }
49  return NULL;
50 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

NEVER_INLINE DEVICE int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 142 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

146  {
147  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
148  int64_t* matching_group = get_matching_group_value_columnar(
150  if (matching_group) {
151  return matching_group;
152  }
153  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
154  while (h_probe != h) {
155  matching_group = get_matching_group_value_columnar(
157  if (matching_group) {
158  return matching_group;
159  }
160  h_probe = (h_probe + 1) % groups_buffer_entry_count;
161  }
162  return NULL;
163 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 88 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar_slot(), groups_buffer_entry_count, and key_hash().

92  {
93  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
94  int32_t matching_slot = get_matching_group_value_columnar_slot(
95  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
96  if (matching_slot != -1) {
97  return h;
98  }
99  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
100  while (h_probe != h) {
102  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
103  if (matching_slot != -1) {
104  return h_probe;
105  }
106  h_probe = (h_probe + 1) % groups_buffer_entry_count;
107  }
108  return -1;
109 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 112 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar_slot(), groups_buffer_entry_count, and key_hash().

116  {
117  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
118  int32_t matching_slot = get_matching_group_value_columnar_slot(
119  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
120  if (matching_slot != -1) {
121  return h;
122  }
123  uint32_t watchdog_countdown = 100;
124  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
125  while (h_probe != h) {
127  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
128  if (matching_slot != -1) {
129  return h_probe;
130  }
131  h_probe = (h_probe + 1) % groups_buffer_entry_count;
132  if (--watchdog_countdown == 0) {
133  if (dynamic_watchdog()) {
134  return -1;
135  }
136  watchdog_countdown = 100;
137  }
138  }
139  return -1;
140 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 165 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

169  {
170  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
171  int64_t* matching_group = get_matching_group_value_columnar(
173  if (matching_group) {
174  return matching_group;
175  }
176  uint32_t watchdog_countdown = 100;
177  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
178  while (h_probe != h) {
179  matching_group = get_matching_group_value_columnar(
181  if (matching_group) {
182  return matching_group;
183  }
184  h_probe = (h_probe + 1) % groups_buffer_entry_count;
185  if (--watchdog_countdown == 0) {
186  if (dynamic_watchdog()) {
187  return NULL;
188  }
189  watchdog_countdown = 100;
190  }
191  }
192  return NULL;
193 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 195 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

200  {
201  int64_t key_diff = key - min_key;
202  if (bucket) {
203  key_diff /= bucket;
204  }
205  int64_t off = key_diff * row_size_quad;
206  if (groups_buffer[off] == EMPTY_KEY_64) {
207  groups_buffer[off] = key;
208  }
209  return groups_buffer + off + 1;
210 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
ALWAYS_INLINE DEVICE int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 212 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

218  {
219  int64_t key_diff = key - min_key;
220  if (bucket) {
221  key_diff /= bucket;
222  }
223  int64_t off = key_diff * row_size_quad;
224  if (groups_buffer[off] == EMPTY_KEY_64) {
225  groups_buffer[off] = orig_key;
226  }
227  return groups_buffer + off + 1;
228 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
NEVER_INLINE DEVICE int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 54 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), groups_buffer_entry_count, and key_hash().

61  {
62  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
63  int64_t* matching_group = get_matching_group_value(
64  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
65  if (matching_group) {
66  return matching_group;
67  }
68  uint32_t watchdog_countdown = 100;
69  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
70  while (h_probe != h) {
71  matching_group = get_matching_group_value(
72  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
73  if (matching_group) {
74  return matching_group;
75  }
76  h_probe = (h_probe + 1) % groups_buffer_entry_count;
77  if (--watchdog_countdown == 0) {
78  if (dynamic_watchdog()) {
79  return NULL;
80  }
81  watchdog_countdown = 100;
82  }
83  }
84  return NULL;
85 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t* get_scan_output_slot ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment,
const uint32_t  row_size_quad 
)

Definition at line 245 of file GroupByRuntime.cpp.

250  {
251  uint64_t off = static_cast<uint64_t>(pos) * static_cast<uint64_t>(row_size_quad);
252  if (pos < output_buffer_entry_count) {
253  output_buffer[off] = offset_in_fragment;
254  return output_buffer + off + 1;
255  }
256  return NULL;
257 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key 
)

Definition at line 284 of file GroupByRuntime.cpp.

References get_hash_slot(), and SUFFIX.

Referenced by hash_join_idx_bitwise(), and hash_join_idx_nullable().

287  {
288  if (key >= min_key && key <= max_key) {
289  return *SUFFIX(get_hash_slot)(reinterpret_cast<int32_t*>(hash_buff), key, min_key);
290  }
291  return -1;
292 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key)
Definition: JoinHashImpl.h:39

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 332 of file GroupByRuntime.cpp.

References hash_join_idx().

337  {
338  return key != null_val
339  ? hash_join_idx(hash_buff, key, min_key, max_key)
340  : hash_join_idx(hash_buff, translated_val, min_key, translated_val);
341 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 382 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

390  {
391  return key != null_val ? hash_join_idx_sharded(hash_buff,
392  key,
393  min_key,
394  max_key,
395  entry_count_per_shard,
396  num_shards,
397  device_count)
398  : hash_join_idx_sharded(hash_buff,
399  translated_val,
400  min_key,
401  translated_val,
402  entry_count_per_shard,
403  num_shards,
404  device_count);
405 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val 
)

Definition at line 306 of file GroupByRuntime.cpp.

References hash_join_idx().

310  {
311  return key != null_val ? hash_join_idx(hash_buff, key, min_key, max_key) : -1;
312 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 344 of file GroupByRuntime.cpp.

References get_hash_slot_sharded(), and SUFFIX.

Referenced by hash_join_idx_bitwise_sharded(), and hash_join_idx_sharded_nullable().

350  {
351  if (key >= min_key && key <= max_key) {
352  return *SUFFIX(get_hash_slot_sharded)(reinterpret_cast<int32_t*>(hash_buff),
353  key,
354  min_key,
355  entry_count_per_shard,
356  num_shards,
357  device_count);
358  }
359  return -1;
360 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot_sharded(int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
Definition: JoinHashImpl.h:60

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val 
)

Definition at line 363 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

370  {
371  return key != null_val ? hash_join_idx_sharded(hash_buff,
372  key,
373  min_key,
374  max_key,
375  entry_count_per_shard,
376  num_shards,
377  device_count)
378  : -1;
379 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function: