OmniSciDB  2e3a973ef4
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "TypePunning.h"
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GroupByRuntime.cpp"
#include "JoinHashTable/JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_CHECKED_SINGLE_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t floor_div_lhs (const int64_t dividend, const int64_t divisor)
 
ALWAYS_INLINE int64_t floor_div_nullable_lhs (const int64_t dividend, const int64_t divisor, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE int32_t checked_single_agg_id (int64_t *agg, const int64_t val, const int64_t null_val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_double (int64_t *agg, const double val, const double null_val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_float (int32_t *agg, const float val, const float null_val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB int32_t checked_single_agg_id_shared (int64_t *agg, const int64_t val, const int64_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared (int32_t *agg, const int32_t val, const int32_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared (int16_t *agg, const int16_t val, const int16_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared (int8_t *agg, const int8_t val, const int8_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_double_shared (int64_t *agg, const double val, const double null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_float_shared (int32_t *agg, const float val, const float null_val)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
GPU_RT_STUB void sync_threadblock ()
 
GPU_RT_STUB void write_back_non_grouped_agg (int64_t *input_buffer, int64_t *output_buffer, const int32_t num_agg_cols)
 
 __attribute__ ((noinline)) int32_t pos_start_impl(int32_t *error_code)
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
GPU_RT_STUB int64_t get_thread_index ()
 
GPU_RT_STUB int64_t * declare_dynamic_shared_memory ()
 
GPU_RT_STUB int64_t get_block_index ()
 
ALWAYS_INLINE int32_t record_error_code (const int32_t err_code, int32_t *error_codes)
 
int64_t * init_shared_mem (const int64_t *global_groups_buffer, const int32_t groups_buffer_size)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE DEVICE bool sample_ratio (const double proportion, const int64_t row_offset)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
ALWAYS_INLINE DEVICE bool check_interrupt ()
 
bool check_interrupt_init (unsigned command)
 

Variables

const int32_t groups_buffer_size
 
int64_t * src
 
int64_t const int32_t sz
 
const int64_t * init_vals
 
const int64_t const uint32_t groups_buffer_entry_count
 
const int64_t const uint32_t const uint32_t key_qw_count
 
const int64_t const uint32_t const uint32_t const uint32_t agg_col_count
 
const int64_t const uint32_t const uint32_t const uint32_t const bool keyless
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool const int32_t frag_idx
 
const int8_t * literals
 
const int8_t const int64_t * num_rows
 
const int8_t const int64_t const uint64_t * frag_row_offsets
 
const int8_t const int64_t const uint64_t const int32_t * max_matched
 
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
 

Macro Definition Documentation

◆ ADDR_T [1/2]

#define ADDR_T   int64_t

Definition at line 706 of file RuntimeFunctions.cpp.

◆ ADDR_T [2/2]

#define ADDR_T   int32_t

Definition at line 706 of file RuntimeFunctions.cpp.

◆ DATA_T [1/6]

#define DATA_T   int64_t

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DATA_T [2/6]

#define DATA_T   int32_t

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DATA_T [3/6]

#define DATA_T   int16_t

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DATA_T [4/6]

#define DATA_T   int8_t

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DATA_T [5/6]

#define DATA_T   double

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DATA_T [6/6]

#define DATA_T   float

Definition at line 705 of file RuntimeFunctions.cpp.

◆ DEF_AGG_ID_INT

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
#define ALWAYS_INLINE

Definition at line 439 of file RuntimeFunctions.cpp.

◆ DEF_AGG_MAX_INT

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 419 of file RuntimeFunctions.cpp.

◆ DEF_AGG_MIN_INT

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 429 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 42 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE_LHS

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 51 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE_RHS

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 60 of file RuntimeFunctions.cpp.

◆ DEF_BINARY_NULLABLE_ALL_OPS

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 114 of file RuntimeFunctions.cpp.

◆ DEF_CAST_NULLABLE

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 233 of file RuntimeFunctions.cpp.

◆ DEF_CAST_NULLABLE_BIDIR

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 241 of file RuntimeFunctions.cpp.

◆ DEF_CHECKED_SINGLE_AGG_ID_INT

#define DEF_CHECKED_SINGLE_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE int32_t checked_single_agg_id_int##n( \
int##n##_t* agg, const int##n##_t val, const int##n##_t null_val) { \
if (val == null_val) { \
return 0; \
} \
if (*agg == val) { \
return 0; \
} else if (*agg == null_val) { \
*agg = val; \
return 0; \
} else { \
/* see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES*/ \
return 15; \
} \
}
#define ALWAYS_INLINE

Definition at line 444 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 69 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE_LHS

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 81 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE_RHS

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 93 of file RuntimeFunctions.cpp.

◆ DEF_SAFE_DIV_NULLABLE

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 105 of file RuntimeFunctions.cpp.

◆ DEF_SHARED_AGG_RET_STUBS

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 732 of file RuntimeFunctions.cpp.

◆ DEF_SHARED_AGG_STUBS

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 771 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG [1/2]

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 684 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

◆ DEF_SKIP_AGG [2/2]

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define ADDR_T
#define DATA_T
#define ALWAYS_INLINE

Definition at line 684 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG_ADD [1/2]

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 676 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG_ADD [2/2]

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define ADDR_T
#define DATA_T
#define ALWAYS_INLINE

Definition at line 676 of file RuntimeFunctions.cpp.

◆ DEF_UMINUS_NULLABLE

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 218 of file RuntimeFunctions.cpp.

◆ DEF_WRITE_PROJECTION_INT

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
#define ALWAYS_INLINE

Definition at line 472 of file RuntimeFunctions.cpp.

◆ GPU_RT_STUB

Function Documentation

◆ __attribute__()

__attribute__ ( (noinline)  )

Definition at line 898 of file RuntimeFunctions.cpp.

Referenced by extract_str_len(), init_shared_mem(), linear_probabilistic_count(), multifrag_query_hoisted_literals(), and record_error_code().

898  {
899  int32_t row_index_resume{0};
900  if (error_code) {
901  row_index_resume = error_code[0];
902  error_code[0] = 0;
903  }
904  return row_index_resume;
905 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
+ Here is the caller graph for this function:

◆ agg_approximate_count_distinct()

NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 317 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

319  {
320  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
321  const uint32_t index = hash >> (64 - b);
322  const uint8_t rank = get_rank(hash << b, 64 - b);
323  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
324  M[index] = std::max(M[index], rank);
325 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:26
+ Here is the call graph for this function:

◆ agg_approximate_count_distinct_gpu()

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 327 of file RuntimeFunctions.cpp.

331  {}

◆ agg_count()

ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 296 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

296  {
297  return (*agg)++;
298 }
+ Here is the caller graph for this function:

◆ agg_count_distinct_bitmap()

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 300 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

302  {
303  const uint64_t bitmap_idx = val - min_val;
304  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
305 }
+ Here is the caller graph for this function:

◆ agg_count_distinct_bitmap_gpu()

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 309 of file RuntimeFunctions.cpp.

315  {}

◆ agg_count_distinct_bitmap_skip_val()

ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 391 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

394  {
395  if (val != skip_val) {
396  agg_count_distinct_bitmap(agg, val, min_val);
397  }
398 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)
+ Here is the call graph for this function:

◆ agg_count_distinct_bitmap_skip_val_gpu()

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 400 of file RuntimeFunctions.cpp.

407  {}

◆ agg_count_double()

ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 576 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

576  {
577  return (*agg)++;
578 }
+ Here is the caller graph for this function:

◆ agg_count_double_skip_val()

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 658 of file RuntimeFunctions.cpp.

References agg_count_double().

660  {
661  if (val != skip_val) {
662  return agg_count_double(agg, val);
663  }
664  return *agg;
665 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)
+ Here is the call graph for this function:

◆ agg_count_float()

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 617 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

617  {
618  return (*agg)++;
619 }
+ Here is the caller graph for this function:

◆ agg_count_float_skip_val()

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 667 of file RuntimeFunctions.cpp.

References agg_count_float().

669  {
670  if (val != skip_val) {
671  return agg_count_float(agg, val);
672  }
673  return *agg;
674 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)
+ Here is the call graph for this function:

◆ agg_count_int32()

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 409 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

409  {
410  return (*agg)++;
411 }
+ Here is the caller graph for this function:

◆ agg_count_int32_skip_val()

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 521 of file RuntimeFunctions.cpp.

References agg_count_int32(), agg_max(), agg_max_int16(), agg_max_int32(), agg_max_int8(), agg_min(), agg_min_int16(), agg_min_int32(), agg_min_int8(), and DEF_SKIP_AGG.

523  {
524  if (val != skip_val) {
525  return agg_count_int32(agg, val);
526  }
527  return *agg;
528 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)
+ Here is the call graph for this function:

◆ agg_count_skip_val()

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 512 of file RuntimeFunctions.cpp.

References agg_count().

514  {
515  if (val != skip_val) {
516  return agg_count(agg, val);
517  }
518  return *agg;
519 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)
+ Here is the call graph for this function:

◆ agg_id()

ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 369 of file RuntimeFunctions.cpp.

369  {
370  *agg = val;
371 }

◆ agg_id_double()

ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 595 of file RuntimeFunctions.cpp.

595  {
596  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
597 }

◆ agg_id_double_shared_slow()

GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 855 of file RuntimeFunctions.cpp.

855 {}

◆ agg_id_float()

ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 636 of file RuntimeFunctions.cpp.

636  {
637  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
638 }

◆ agg_max()

ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 361 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

361  {
362  *agg = std::max(*agg, val);
363 }
+ Here is the caller graph for this function:

◆ agg_max_double()

ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 585 of file RuntimeFunctions.cpp.

585  {
586  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
587  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
588 }

◆ agg_max_float()

ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 626 of file RuntimeFunctions.cpp.

626  {
627  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
628  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
629 }

◆ agg_max_int16_skip_val_shared()

GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 839 of file RuntimeFunctions.cpp.

841  {}

◆ agg_max_int8_skip_val_shared()

GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 843 of file RuntimeFunctions.cpp.

845  {}

◆ agg_min()

ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 365 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

365  {
366  *agg = std::min(*agg, val);
367 }
+ Here is the caller graph for this function:

◆ agg_min_double()

ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 590 of file RuntimeFunctions.cpp.

590  {
591  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
592  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
593 }

◆ agg_min_float()

ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 631 of file RuntimeFunctions.cpp.

631  {
632  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
633  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
634 }

◆ agg_min_int16_skip_val_shared()

GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 847 of file RuntimeFunctions.cpp.

849  {}

◆ agg_min_int8_skip_val_shared()

GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 851 of file RuntimeFunctions.cpp.

853  {}

◆ agg_sum()

ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 355 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

355  {
356  const auto old = *agg;
357  *agg += val;
358  return old;
359 }
+ Here is the caller graph for this function:

◆ agg_sum_double()

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 580 of file RuntimeFunctions.cpp.

580  {
581  const auto r = *reinterpret_cast<const double*>(agg) + val;
582  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
583 }

◆ agg_sum_double_shared()

GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 876 of file RuntimeFunctions.cpp.

876 {}

◆ agg_sum_double_skip_val_shared()

GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 878 of file RuntimeFunctions.cpp.

880  {}

◆ agg_sum_float()

ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 621 of file RuntimeFunctions.cpp.

621  {
622  const auto r = *reinterpret_cast<const float*>(agg) + val;
623  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
624 }

◆ agg_sum_float_shared()

GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 881 of file RuntimeFunctions.cpp.

881 {}

◆ agg_sum_float_skip_val_shared()

GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 883 of file RuntimeFunctions.cpp.

885  {}

◆ agg_sum_int32()

ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 413 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

413  {
414  const auto old = *agg;
415  *agg += val;
416  return old;
417 }
+ Here is the caller graph for this function:

◆ agg_sum_int32_shared()

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 866 of file RuntimeFunctions.cpp.

866  {
867  return 0;
868 }

◆ agg_sum_int32_skip_val()

ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 498 of file RuntimeFunctions.cpp.

References agg_sum_int32().

500  {
501  const auto old = *agg;
502  if (val != skip_val) {
503  if (old != skip_val) {
504  return agg_sum_int32(agg, val);
505  } else {
506  *agg = val;
507  }
508  }
509  return old;
510 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)
+ Here is the call graph for this function:

◆ agg_sum_int32_skip_val_shared()

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 870 of file RuntimeFunctions.cpp.

872  {
873  return 0;
874 }

◆ agg_sum_shared()

GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 857 of file RuntimeFunctions.cpp.

857  {
858  return 0;
859 }

◆ agg_sum_skip_val()

ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 484 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

486  {
487  const auto old = *agg;
488  if (val != skip_val) {
489  if (old != skip_val) {
490  return agg_sum(agg, val);
491  } else {
492  *agg = val;
493  }
494  }
495  return old;
496 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ agg_sum_skip_val_shared()

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 861 of file RuntimeFunctions.cpp.

863  {
864  return 0;
865 }

◆ bit_is_set()

ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 333 of file RuntimeFunctions.cpp.

338  {
339  if (val == null_val) {
340  return null_bool_val;
341  }
342  if (val < min_val || val > max_val) {
343  return 0;
344  }
345  if (!bitset) {
346  return 0;
347  }
348  const uint64_t bitmap_idx = val - min_val;
349  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
350  (1 << (bitmap_idx & 7))
351  ? 1
352  : 0;
353 }

◆ char_length()

ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1227 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1228  {
1229  return str_len;
1230 }
+ Here is the caller graph for this function:

◆ char_length_nullable()

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1232 of file RuntimeFunctions.cpp.

1234  {
1235  if (!str) {
1236  return int_null;
1237  }
1238  return str_len;
1239 }

◆ check_interrupt()

ALWAYS_INLINE DEVICE bool check_interrupt ( )

Definition at line 1389 of file RuntimeFunctions.cpp.

References check_interrupt_init(), and INT_CHECK.

1389  {
1390  if (check_interrupt_init(static_cast<unsigned>(INT_CHECK))) {
1391  return true;
1392  }
1393  return false;
1394 }
bool check_interrupt_init(unsigned command)
+ Here is the call graph for this function:

◆ check_interrupt_init()

bool check_interrupt_init ( unsigned  command)

Definition at line 1396 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, and INT_RESET.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

1396  {
1397  static std::atomic_bool runtime_interrupt_flag{false};
1398 
1399  if (command == static_cast<unsigned>(INT_CHECK)) {
1400  if (runtime_interrupt_flag.load()) {
1401  return true;
1402  }
1403  return false;
1404  }
1405  if (command == static_cast<unsigned>(INT_ABORT)) {
1406  runtime_interrupt_flag.store(true);
1407  return false;
1408  }
1409  if (command == static_cast<unsigned>(INT_RESET)) {
1410  runtime_interrupt_flag.store(false);
1411  return false;
1412  }
1413  return false;
1414 }
+ Here is the caller graph for this function:

◆ checked_single_agg_id()

ALWAYS_INLINE int32_t checked_single_agg_id ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 373 of file RuntimeFunctions.cpp.

375  {
376  if (val == null_val) {
377  return 0;
378  }
379 
380  if (*agg == val) {
381  return 0;
382  } else if (*agg == null_val) {
383  *agg = val;
384  return 0;
385  } else {
386  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
387  return 15;
388  }
389 }

◆ checked_single_agg_id_double()

ALWAYS_INLINE int32_t checked_single_agg_id_double ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 599 of file RuntimeFunctions.cpp.

601  {
602  if (val == null_val) {
603  return 0;
604  }
605 
606  if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)))) {
607  return 0;
608  } else if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&null_val)))) {
609  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
610  return 0;
611  } else {
612  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
613  return 15;
614  }
615 }

◆ checked_single_agg_id_double_shared()

GPU_RT_STUB int32_t checked_single_agg_id_double_shared ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 827 of file RuntimeFunctions.cpp.

829  {
830  return 0;
831 }

◆ checked_single_agg_id_float()

ALWAYS_INLINE int32_t checked_single_agg_id_float ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 640 of file RuntimeFunctions.cpp.

642  {
643  if (val == null_val) {
644  return 0;
645  }
646 
647  if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)))) {
648  return 0;
649  } else if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&null_val)))) {
650  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
651  return 0;
652  } else {
653  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
654  return 15;
655  }
656 }

◆ checked_single_agg_id_float_shared()

GPU_RT_STUB int32_t checked_single_agg_id_float_shared ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 833 of file RuntimeFunctions.cpp.

835  {
836  return 0;
837 }

◆ checked_single_agg_id_int16_shared()

GPU_RT_STUB int32_t checked_single_agg_id_int16_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  null_val 
)

Definition at line 815 of file RuntimeFunctions.cpp.

817  {
818  return 0;
819 }

◆ checked_single_agg_id_int32_shared()

GPU_RT_STUB int32_t checked_single_agg_id_int32_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  null_val 
)

Definition at line 809 of file RuntimeFunctions.cpp.

References GPU_RT_STUB.

811  {
812  return 0;
813 }

◆ checked_single_agg_id_int8_shared()

GPU_RT_STUB int32_t checked_single_agg_id_int8_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  null_val 
)

Definition at line 820 of file RuntimeFunctions.cpp.

References GPU_RT_STUB.

822  {
823  return 0;
824 }

◆ checked_single_agg_id_shared()

GPU_RT_STUB int32_t checked_single_agg_id_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 802 of file RuntimeFunctions.cpp.

References GPU_RT_STUB.

804  {
805  return 0;
806 }

◆ decimal_ceil()

ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 726 of file RuntimeFunctions.cpp.

References decimal_floor().

726  {
727  return decimal_floor(x, scale) + (x % scale ? scale : 0);
728 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)
+ Here is the call graph for this function:

◆ decimal_floor()

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 716 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

716  {
717  if (x >= 0) {
718  return x / scale * scale;
719  }
720  if (!(x % scale)) {
721  return x;
722  }
723  return x / scale * scale - scale;
724 }
+ Here is the caller graph for this function:

◆ declare_dynamic_shared_memory()

GPU_RT_STUB int64_t* declare_dynamic_shared_memory ( )

Definition at line 923 of file RuntimeFunctions.cpp.

923  {
924  return nullptr;
925 }

◆ extract_str_len()

ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1200 of file RuntimeFunctions.cpp.

References __attribute__(), extract_str_len_noinline(), extract_str_ptr(), and extract_str_ptr_noinline().

1200  {
1201  return static_cast<int64_t>(str_and_len) >> 48;
1202 }
+ Here is the call graph for this function:

◆ extract_str_ptr()

ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1196 of file RuntimeFunctions.cpp.

Referenced by extract_str_len().

1196  {
1197  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1198 }
+ Here is the caller graph for this function:

◆ floor_div_lhs()

ALWAYS_INLINE int64_t floor_div_lhs ( const int64_t  dividend,
const int64_t  divisor 
)

Definition at line 205 of file RuntimeFunctions.cpp.

Referenced by floor_div_nullable_lhs().

206  {
207  return (dividend < 0 ? dividend - (divisor - 1) : dividend) / divisor;
208 }
+ Here is the caller graph for this function:

◆ floor_div_nullable_lhs()

ALWAYS_INLINE int64_t floor_div_nullable_lhs ( const int64_t  dividend,
const int64_t  divisor,
const int64_t  null_val 
)

Definition at line 212 of file RuntimeFunctions.cpp.

References floor_div_lhs().

214  {
215  return dividend == null_val ? null_val : floor_div_lhs(dividend, divisor);
216 }
ALWAYS_INLINE int64_t floor_div_lhs(const int64_t dividend, const int64_t divisor)
+ Here is the call graph for this function:

◆ force_sync()

GPU_RT_STUB void force_sync ( )

Definition at line 887 of file RuntimeFunctions.cpp.

887 {}

◆ get_block_index()

GPU_RT_STUB int64_t get_block_index ( )

Definition at line 927 of file RuntimeFunctions.cpp.

927  {
928  return 0;
929 }

◆ get_group_value_fast_keyless()

ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1176 of file RuntimeFunctions.cpp.

1181  {
1182  return groups_buffer + row_size_quad * (key - min_key);
1183 }

◆ get_group_value_fast_keyless_semiprivate()

ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1185 of file RuntimeFunctions.cpp.

1192  {
1193  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1194 }
const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size
GPU_RT_STUB int8_t thread_warp_idx(const int8_t warp_sz)

◆ get_matching_group_value() [1/2]

template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1005 of file RuntimeFunctions.cpp.

References align_to_int64().

Referenced by get_group_value(), get_group_value_with_watchdog(), and get_matching_group_value().

1009  {
1010  auto off = h * row_size_quad;
1011  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
1012  if (*row_ptr == get_empty_key<T>()) {
1013  memcpy(row_ptr, key, key_count * sizeof(T));
1014  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1015  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1016  }
1017  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
1018  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1019  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1020  }
1021  return nullptr;
1022 }
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_matching_group_value() [2/2]

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 1024 of file RuntimeFunctions.cpp.

References get_matching_group_value().

1030  {
1031  switch (key_width) {
1032  case 4:
1033  return get_matching_group_value(groups_buffer,
1034  h,
1035  reinterpret_cast<const int32_t*>(key),
1036  key_count,
1037  row_size_quad);
1038  case 8:
1039  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
1040  default:;
1041  }
1042  return nullptr;
1043 }
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
+ Here is the call graph for this function:

◆ get_matching_group_value_columnar()

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 1093 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and key_qw_count.

Referenced by get_group_value_columnar(), and get_group_value_columnar_with_watchdog().

1098  {
1099  auto off = h;
1100  if (groups_buffer[off] == EMPTY_KEY_64) {
1101  for (size_t i = 0; i < key_qw_count; ++i) {
1102  groups_buffer[off] = key[i];
1103  off += entry_count;
1104  }
1105  return &groups_buffer[off];
1106  }
1107  off = h;
1108  for (size_t i = 0; i < key_qw_count; ++i) {
1109  if (groups_buffer[off] != key[i]) {
1110  return nullptr;
1111  }
1112  off += entry_count;
1113  }
1114  return &groups_buffer[off];
1115 }
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count
+ Here is the caller graph for this function:

◆ get_matching_group_value_columnar_slot() [1/2]

template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 1046 of file RuntimeFunctions.cpp.

References ALWAYS_INLINE.

Referenced by get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), and get_matching_group_value_columnar_slot().

1050  {
1051  auto off = h;
1052  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
1053  if (key_buffer[off] == get_empty_key<T>()) {
1054  for (size_t i = 0; i < key_count; ++i) {
1055  key_buffer[off] = key[i];
1056  off += entry_count;
1057  }
1058  return h;
1059  }
1060  off = h;
1061  for (size_t i = 0; i < key_count; ++i) {
1062  if (key_buffer[off] != key[i]) {
1063  return -1;
1064  }
1065  off += entry_count;
1066  }
1067  return h;
1068 }
+ Here is the caller graph for this function:

◆ get_matching_group_value_columnar_slot() [2/2]

ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 1071 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

1076  {
1077  switch (key_width) {
1078  case 4:
1079  return get_matching_group_value_columnar_slot(groups_buffer,
1080  entry_count,
1081  h,
1082  reinterpret_cast<const int32_t*>(key),
1083  key_count);
1084  case 8:
1086  groups_buffer, entry_count, h, key, key_count);
1087  default:
1088  return -1;
1089  }
1090  return -1;
1091 }
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
+ Here is the call graph for this function:

◆ get_matching_group_value_perfect_hash()

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1128 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1133  {
1134  uint32_t off = hashed_index * row_size_quad;
1135  if (groups_buffer[off] == EMPTY_KEY_64) {
1136  for (uint32_t i = 0; i < key_count; ++i) {
1137  groups_buffer[off + i] = key[i];
1138  }
1139  }
1140  return groups_buffer + off + key_count;
1141 }
#define EMPTY_KEY_64

◆ get_matching_group_value_perfect_hash_keyless()

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 1149 of file RuntimeFunctions.cpp.

1152  {
1153  return groups_buffer + row_size_quad * hashed_index;
1154 }

◆ get_thread_index()

GPU_RT_STUB int64_t get_thread_index ( )

Definition at line 919 of file RuntimeFunctions.cpp.

919  {
920  return 0;
921 }

◆ init_shared_mem()

int64_t* init_shared_mem ( const int64_t *  global_groups_buffer,
const int32_t  groups_buffer_size 
)

Definition at line 962 of file RuntimeFunctions.cpp.

References __attribute__().

963  {
964  return nullptr;
965 }
+ Here is the call graph for this function:

◆ key_for_string_encoded()

ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1241 of file RuntimeFunctions.cpp.

1241  {
1242  return str_id;
1243 }

◆ linear_probabilistic_count()

NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1296 of file RuntimeFunctions.cpp.

References __attribute__(), and MurmurHash1().

1299  {
1300  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1301  const uint32_t word_idx = bit_pos / 32;
1302  const uint32_t bit_idx = bit_pos % 32;
1303  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1304 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20
+ Here is the call graph for this function:

◆ load_avg_decimal()

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1275 of file RuntimeFunctions.cpp.

1278  {
1279  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1280 }

◆ load_avg_double()

ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1282 of file RuntimeFunctions.cpp.

1284  {
1285  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1286  : null_val;
1287 }

◆ load_avg_float()

ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1289 of file RuntimeFunctions.cpp.

1291  {
1292  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1293  : null_val;
1294 }

◆ load_avg_int()

ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1269 of file RuntimeFunctions.cpp.

1271  {
1272  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1273 }

◆ load_double()

ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1261 of file RuntimeFunctions.cpp.

1261  {
1262  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1263 }

◆ load_float()

ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1265 of file RuntimeFunctions.cpp.

1265  {
1266  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1267 }

◆ logical_and()

ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 270 of file RuntimeFunctions.cpp.

272  {
273  if (lhs == null_val) {
274  return rhs == 0 ? rhs : null_val;
275  }
276  if (rhs == null_val) {
277  return lhs == 0 ? lhs : null_val;
278  }
279  return (lhs && rhs) ? 1 : 0;
280 }

◆ logical_not()

ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 266 of file RuntimeFunctions.cpp.

266  {
267  return operand == null_val ? operand : (operand ? 0 : 1);
268 }

◆ logical_or()

ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 282 of file RuntimeFunctions.cpp.

284  {
285  if (lhs == null_val) {
286  return rhs == 0 ? null_val : rhs;
287  }
288  if (rhs == null_val) {
289  return lhs == 0 ? null_val : lhs;
290  }
291  return (lhs || rhs) ? 1 : 0;
292 }

◆ multifrag_query()

void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1364 of file RuntimeFunctions.cpp.

1374  {
1375  for (uint32_t i = 0; i < *num_fragments; ++i) {
1376  query_stub(col_buffers ? col_buffers[i] : nullptr,
1377  &num_rows[i * (*num_tables_ptr)],
1378  &frag_row_offsets[i * (*num_tables_ptr)],
1379  max_matched,
1381  out,
1382  i,
1384  total_matched,
1385  error_code);
1386  }
1387 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out

◆ multifrag_query_hoisted_literals()

void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1323 of file RuntimeFunctions.cpp.

References __attribute__(), error_code, frag_idx, frag_row_offsets, init_agg_value, join_hash_tables, max_matched, num_rows, out, and total_matched.

1334  {
1335  for (uint32_t i = 0; i < *num_fragments; ++i) {
1336  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1337  literals,
1338  &num_rows[i * (*num_tables_ptr)],
1339  &frag_row_offsets[i * (*num_tables_ptr)],
1340  max_matched,
1342  out,
1343  i,
1345  total_matched,
1346  error_code);
1347  }
1348 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
+ Here is the call graph for this function:

◆ percent_window_func()

ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1256 of file RuntimeFunctions.cpp.

1257  {
1258  return reinterpret_cast<const double*>(output_buff)[pos];
1259 }

◆ record_error_code()

ALWAYS_INLINE int32_t record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 933 of file RuntimeFunctions.cpp.

References __attribute__().

934  {
935  // NB: never override persistent error codes (with code greater than zero).
936  // On GPU, a projection query with a limit can run out of slots without it
937  // being an actual error if the limit has been hit. If a persistent error
938  // (division by zero, for example) occurs before running out of slots, we
939  // have to avoid overriding it, because there's a risk that the query would
940  // go through if we override with a potentially benign out-of-slots code.
941  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
942  error_codes[pos_start_impl(nullptr)] = err_code;
943  }
944  return err_code;
945 }
+ Here is the call graph for this function:

◆ row_number_window_func()

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1251 of file RuntimeFunctions.cpp.

1252  {
1253  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1254 }

◆ sample_ratio()

ALWAYS_INLINE DEVICE bool sample_ratio ( const double  proportion,
const int64_t  row_offset 
)

Definition at line 1245 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1246  {
1247  const int64_t threshold = 4294967296 * proportion;
1248  return (row_offset * 2654435761) % 4294967296 < threshold;
1249 }
+ Here is the caller graph for this function:

◆ scale_decimal_down_not_nullable()

ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 195 of file RuntimeFunctions.cpp.

197  {
198  int64_t tmp = scale >> 1;
199  tmp = operand >= 0 ? operand + tmp : operand - tmp;
200  return tmp / scale;
201 }

◆ scale_decimal_down_nullable()

ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 182 of file RuntimeFunctions.cpp.

184  {
185  // rounded scale down of a decimal
186  if (operand == null_val) {
187  return null_val;
188  }
189 
190  int64_t tmp = scale >> 1;
191  tmp = operand >= 0 ? operand + tmp : operand - tmp;
192  return tmp / scale;
193 }

◆ scale_decimal_up()

ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 175 of file RuntimeFunctions.cpp.

178  {
179  return operand != operand_null_val ? operand * scale : result_null_val;
180 }

◆ set_matching_group_value_perfect_hash_columnar()

ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1160 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1165  {
1166  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1167  for (uint32_t i = 0; i < key_count; i++) {
1168  groups_buffer[i * entry_count + hashed_index] = key[i];
1169  }
1170  }
1171 }
#define EMPTY_KEY_64

◆ string_pack()

ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1214 of file RuntimeFunctions.cpp.

1214  {
1215  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1216  (static_cast<const uint64_t>(len) << 48);
1217 }

◆ sync_threadblock()

GPU_RT_STUB void sync_threadblock ( )

Definition at line 891 of file RuntimeFunctions.cpp.

Referenced by GpuSharedMemCodeBuilder::codegenInitialization(), and GpuSharedMemCodeBuilder::codegenReduction().

891 {}
+ Here is the caller graph for this function:

◆ sync_warp()

GPU_RT_STUB void sync_warp ( )

Definition at line 889 of file RuntimeFunctions.cpp.

889 {}

◆ sync_warp_protected()

GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 890 of file RuntimeFunctions.cpp.

890 {}

◆ thread_warp_idx()

GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 915 of file RuntimeFunctions.cpp.

915  {
916  return 0;
917 }

◆ write_back_non_grouped_agg()

GPU_RT_STUB void write_back_non_grouped_agg ( int64_t *  input_buffer,
int64_t *  output_buffer,
const int32_t  num_agg_cols 
)

Definition at line 893 of file RuntimeFunctions.cpp.

895  {};

Variable Documentation

◆ agg_col_count

◆ blocks_share_memory

const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory

Definition at line 981 of file RuntimeFunctions.cpp.

◆ error_code

◆ frag_idx

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t frag_idx
Initial value:
{
assert(groups_buffer)

Definition at line 987 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals(), query_group_by_template_impl(), query_template_impl(), and Executor::skipFragment().

◆ frag_row_offsets

const int64_t const uint64_t * frag_row_offsets

Definition at line 1308 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals().

◆ groups_buffer_entry_count

◆ groups_buffer_size

const int32_t groups_buffer_size
Initial value:
{
return groups_buffer

Definition at line 951 of file RuntimeFunctions.cpp.

Referenced by QueryMemoryInitializer::allocateCountDistinctSet(), copy_group_by_buffers_from_gpu(), and create_dev_group_by_buffers().

◆ init_agg_value

const int64_t const uint64_t const int32_t const int64_t * init_agg_value

Definition at line 1308 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals().

◆ init_vals

const int64_t * init_vals

Definition at line 969 of file RuntimeFunctions.cpp.

Referenced by QueryMemoryInitializer::getNumBuffers().

◆ join_hash_tables

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables

◆ key_qw_count

◆ keyless

const int64_t const uint32_t const uint32_t const uint32_t const bool keyless

Definition at line 969 of file RuntimeFunctions.cpp.

Referenced by GroupByAndAggregate::getKeylessInfo().

◆ literals

const int8_t* literals

◆ max_matched

const int64_t const uint64_t const int32_t * max_matched

◆ num_rows

const int64_t * num_rows

Definition at line 1308 of file RuntimeFunctions.cpp.

Referenced by Catalog_Namespace::Catalog::addForeignTableDetails(), DictionaryValueConverter< TARGET_TYPE >::allocateColumnarBuffer(), StringValueConverter::allocateColumnarData(), ArrayValueConverter< ELEMENT_CONVERTER >::allocateColumnarData(), GeoPointValueConverter::allocateColumnarData(), GeoLinestringValueConverter::allocateColumnarData(), GeoPolygonValueConverter::allocateColumnarData(), GeoMultiPolygonValueConverter::allocateColumnarData(), import_export::DataStreamSink::archivePlumber(), SpeculativeTopNMap::asRows(), Catalog_Namespace::Catalog::buildForeignServerMap(), ColumnarResults::ColumnarResults(), Catalog_Namespace::Catalog::dropForeignServer(), Catalog_Namespace::Catalog::dropFsiSchemasAndTables(), Executor::executePlanWithGroupBy(), Executor::executePlanWithoutGroupBy(), Executor::fetchUnionChunks(), Fragmenter_Namespace::FixedLenArrayChunkConverter::FixedLenArrayChunkConverter(), get_num_allocated_rows_from_gpu(), Catalog_Namespace::Catalog::getForeignServersForUser(), Catalog_Namespace::SysCatalog::getGranteesOfSharedDashboards(), Executor::getRowCountAndOffsetForAllFrags(), foreign_storage::anonymous_namespace{LazyParquetImporter.cpp}::import_row_group(), QueryExecutionContext::launchCpuCode(), QueryExecutionContext::launchGpuCode(), multifrag_query_hoisted_literals(), Parser::InsertIntoTableAsSelectStmt::populateData(), QueryExecutionContext::QueryExecutionContext(), foreign_storage::anonymous_namespace{LazyParquetImporter.cpp}::read_parquet_metadata_into_import_buffer(), SqliteMemDatabase::runSelect(), com.mapd.tests.DistributedConcurrencyTest::runTest(), com.mapd.tests.SelectUpdateDeleteDifferentTables::runTest(), com.mapd.tests.AlterDropTruncateValidateConcurrencyTest::runTest(), com.mapd.tests.CtasItasSelectUpdelConcurrencyTest::runTest(), com.mapd.tests.UpdateDeleteInsertConcurrencyTest::runTest(), Catalog_Namespace::Catalog::setForeignServerProperty(), Fragmenter_Namespace::StringChunkConverter::StringChunkConverter(), Fragmenter_Namespace::InsertOrderFragmenter::updateColumns(), foreign_storage::anonymous_namespace{LazyParquetImporter.cpp}::validate_parquet_metadata(), anonymous_namespace{ExternalExecutor.cpp}::vt_column(), anonymous_namespace{ExternalExecutor.cpp}::vt_next(), and TargetValueConverter::~TargetValueConverter().

◆ out

◆ src

◆ sz

◆ total_matched

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
Initial value:
{
assert(col_buffers || literals || num_rows || frag_row_offsets || max_matched ||
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool const int32_t frag_idx
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out

Definition at line 1317 of file RuntimeFunctions.cpp.

Referenced by QueryExecutionContext::launchCpuCode(), multifrag_query_hoisted_literals(), query_group_by_template_impl(), and query_template_impl().

◆ warp_size

const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size