OmniSciDB  d2f719934e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "Shared/quantile.h"
#include "TypePunning.h"
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GeoOpsRuntime.cpp"
#include "GroupByRuntime.cpp"
#include "JoinHashTable/Runtime/JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_SCALED_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define DEF_ROUND_NULLABLE(from_type, to_type)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_CHECKED_SINGLE_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t floor_div_lhs (const int64_t dividend, const int64_t divisor)
 
ALWAYS_INLINE int64_t floor_div_nullable_lhs (const int64_t dividend, const int64_t divisor, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE int8_t * agg_id_varlen (int8_t *varlen_buffer, const int64_t offset, const int8_t *value, const int64_t size_bytes)
 
ALWAYS_INLINE int32_t checked_single_agg_id (int64_t *agg, const int64_t val, const int64_t null_val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_double (int64_t *agg, const double val, const double null_val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_float (int32_t *agg, const float val, const float null_val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB int8_t * agg_id_varlen_shared (int8_t *varlen_buffer, const int64_t offset, const int8_t *value, const int64_t size_bytes)
 
GPU_RT_STUB int32_t checked_single_agg_id_shared (int64_t *agg, const int64_t val, const int64_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared (int32_t *agg, const int32_t val, const int32_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared (int16_t *agg, const int16_t val, const int16_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared (int8_t *agg, const int8_t val, const int8_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_double_shared (int64_t *agg, const double val, const double null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_float_shared (int32_t *agg, const float val, const float null_val)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
GPU_RT_STUB void sync_threadblock ()
 
GPU_RT_STUB void write_back_non_grouped_agg (int64_t *input_buffer, int64_t *output_buffer, const int32_t num_agg_cols)
 
NEVER_INLINE int32_t pos_start_impl (int32_t *error_code)
 
NEVER_INLINE int32_t group_buff_idx_impl ()
 
NEVER_INLINE int32_t pos_step_impl ()
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
GPU_RT_STUB int64_t get_thread_index ()
 
GPU_RT_STUB int64_t * declare_dynamic_shared_memory ()
 
GPU_RT_STUB int64_t get_block_index ()
 
ALWAYS_INLINE void record_error_code (const int32_t err_code, int32_t *error_codes)
 
ALWAYS_INLINE int32_t get_error_code (int32_t *error_codes)
 
NEVER_INLINE const int64_t * init_shared_mem_nop (const int64_t *groups_buffer, const int32_t groups_buffer_size)
 
NEVER_INLINE void write_back_nop (int64_t *dest, int64_t *src, const int32_t sz)
 
int64_t * init_shared_mem (const int64_t *global_groups_buffer, const int32_t groups_buffer_size)
 
NEVER_INLINE void init_group_by_buffer_gpu (int64_t *groups_buffer, const int64_t *init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_qw_count, const uint32_t agg_col_count, const bool keyless, const int8_t warp_size)
 
NEVER_INLINE void init_columnar_group_by_buffer_gpu (int64_t *groups_buffer, const int64_t *init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_qw_count, const uint32_t agg_col_count, const bool keyless, const bool blocks_share_memory, const int32_t frag_idx)
 
NEVER_INLINE void init_group_by_buffer_impl (int64_t *groups_buffer, const int64_t *init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_qw_count, const uint32_t agg_col_count, const bool keyless, const int8_t warp_size)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
NEVER_INLINE int8_t * extract_str_ptr_noinline (const uint64_t str_and_len)
 
NEVER_INLINE int32_t extract_str_len_noinline (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE DEVICE bool sample_ratio (const double proportion, const int64_t row_offset)
 
ALWAYS_INLINE DEVICE double width_bucket (const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)
 
ALWAYS_INLINE DEVICE double width_bucket_reversed (const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)
 
ALWAYS_INLINE double width_bucket_nullable (const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count, const double null_val)
 
ALWAYS_INLINE double width_bucket_reversed_nullable (const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count, const double null_val)
 
ALWAYS_INLINE DEVICE double width_bucket_no_oob_check (const double target_value, const double lower_bound, const double scale_factor)
 
ALWAYS_INLINE DEVICE double width_bucket_reversed_no_oob_check (const double target_value, const double lower_bound, const double scale_factor)
 
ALWAYS_INLINE DEVICE double width_bucket_expr (const double target_value, const bool reversed, const double lower_bound, const double upper_bound, const int32_t partition_count)
 
ALWAYS_INLINE DEVICE double width_bucket_expr_nullable (const double target_value, const bool reversed, const double lower_bound, const double upper_bound, const int32_t partition_count, const double null_val)
 
ALWAYS_INLINE DEVICE double width_bucket_expr_no_oob_check (const double target_value, const bool reversed, const double lower_bound, const double upper_bound, const int32_t partition_count)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
NEVER_INLINE void query_stub_hoisted_literals (const int8_t **col_buffers, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, const int64_t *init_agg_value, int64_t **out, uint32_t frag_idx, const int64_t *join_hash_tables, int32_t *error_code, int32_t *total_matched)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
NEVER_INLINE void query_stub (const int8_t **col_buffers, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, const int64_t *init_agg_value, int64_t **out, uint32_t frag_idx, const int64_t *join_hash_tables, int32_t *error_code, int32_t *total_matched)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
ALWAYS_INLINE DEVICE bool check_interrupt ()
 
bool check_interrupt_init (unsigned command)
 

Macro Definition Documentation

#define ADDR_T   int64_t

Definition at line 758 of file RuntimeFunctions.cpp.

#define ADDR_T   int32_t

Definition at line 758 of file RuntimeFunctions.cpp.

#define DATA_T   int64_t

Definition at line 757 of file RuntimeFunctions.cpp.

#define DATA_T   int32_t

Definition at line 757 of file RuntimeFunctions.cpp.

#define DATA_T   int16_t

Definition at line 757 of file RuntimeFunctions.cpp.

#define DATA_T   int8_t

Definition at line 757 of file RuntimeFunctions.cpp.

#define DATA_T   double

Definition at line 757 of file RuntimeFunctions.cpp.

#define DATA_T   float

Definition at line 757 of file RuntimeFunctions.cpp.

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
constexpr double n
Definition: Utm.h:39
#define ALWAYS_INLINE

Definition at line 491 of file RuntimeFunctions.cpp.

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
constexpr double n
Definition: Utm.h:39
#define ALWAYS_INLINE

Definition at line 471 of file RuntimeFunctions.cpp.

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
constexpr double n
Definition: Utm.h:39
#define ALWAYS_INLINE

Definition at line 481 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 44 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 53 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 62 of file RuntimeFunctions.cpp.

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 116 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 235 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 252 of file RuntimeFunctions.cpp.

#define DEF_CAST_SCALED_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_scaled_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val, \
const to_type multiplier) { \
return operand == from_null_val ? to_null_val : multiplier * operand; \
}
#define ALWAYS_INLINE

Definition at line 243 of file RuntimeFunctions.cpp.

#define DEF_CHECKED_SINGLE_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE int32_t checked_single_agg_id_int##n( \
int##n##_t* agg, const int##n##_t val, const int##n##_t null_val) { \
if (val == null_val) { \
return 0; \
} \
if (*agg == val) { \
return 0; \
} else if (*agg == null_val) { \
*agg = val; \
return 0; \
} else { \
/* see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES*/ \
return 15; \
} \
}
constexpr double n
Definition: Utm.h:39
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 496 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 71 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 83 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 95 of file RuntimeFunctions.cpp.

#define DEF_ROUND_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val \
? to_null_val \
: static_cast<to_type>(operand + (operand < from_type(0) \
? from_type(-0.5) \
: from_type(0.5))); \
}
#define ALWAYS_INLINE

Definition at line 256 of file RuntimeFunctions.cpp.

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 107 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 784 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 823 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define const
#define DATA_T
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 736 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define const
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 736 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 728 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 728 of file RuntimeFunctions.cpp.

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 220 of file RuntimeFunctions.cpp.

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
constexpr double n
Definition: Utm.h:39
#define ALWAYS_INLINE
if(yyssp >=yyss+yystacksize-1)

Definition at line 524 of file RuntimeFunctions.cpp.

#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))

Definition at line 348 of file RuntimeFunctions.cpp.

Function Documentation

NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 359 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

361  {
362  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
363  const uint32_t index = hash >> (64 - b);
364  const uint8_t rank = get_rank(hash << b, 64 - b);
365  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
366  M[index] = std::max(M[index], rank);
367 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
RUNTIME_EXPORT NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:27

+ Here is the call graph for this function:

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 369 of file RuntimeFunctions.cpp.

373  {}
ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 334 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

334  {
335  return (*agg)++;
336 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 338 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

340  {
341  const uint64_t bitmap_idx = val - min_val;
342  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
343 }

+ Here is the caller graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 351 of file RuntimeFunctions.cpp.

357  {}
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 443 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

446  {
447  if (val != skip_val) {
448  agg_count_distinct_bitmap(agg, val, min_val);
449  }
450 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)

+ Here is the call graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 452 of file RuntimeFunctions.cpp.

459  {}
ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 628 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

628  {
629  return (*agg)++;
630 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 710 of file RuntimeFunctions.cpp.

References agg_count_double().

712  {
713  if (val != skip_val) {
714  return agg_count_double(agg, val);
715  }
716  return *agg;
717 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 669 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

669  {
670  return (*agg)++;
671 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 719 of file RuntimeFunctions.cpp.

References agg_count_float().

721  {
722  if (val != skip_val) {
723  return agg_count_float(agg, val);
724  }
725  return *agg;
726 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 461 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

461  {
462  return (*agg)++;
463 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 573 of file RuntimeFunctions.cpp.

References agg_count_int32().

575  {
576  if (val != skip_val) {
577  return agg_count_int32(agg, val);
578  }
579  return *agg;
580 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)

+ Here is the call graph for this function:

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 564 of file RuntimeFunctions.cpp.

References agg_count().

566  {
567  if (val != skip_val) {
568  return agg_count(agg, val);
569  }
570  return *agg;
571 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)

+ Here is the call graph for this function:

ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 411 of file RuntimeFunctions.cpp.

411  {
412  *agg = val;
413 }
ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 647 of file RuntimeFunctions.cpp.

647  {
648  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
649 }
GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 914 of file RuntimeFunctions.cpp.

914 {}
ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 688 of file RuntimeFunctions.cpp.

688  {
689  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
690 }
ALWAYS_INLINE int8_t* agg_id_varlen ( int8_t *  varlen_buffer,
const int64_t  offset,
const int8_t *  value,
const int64_t  size_bytes 
)

Definition at line 415 of file RuntimeFunctions.cpp.

References i.

418  {
419  for (auto i = 0; i < size_bytes; i++) {
420  varlen_buffer[offset + i] = value[i];
421  }
422  return &varlen_buffer[offset];
423 }
GPU_RT_STUB int8_t* agg_id_varlen_shared ( int8_t *  varlen_buffer,
const int64_t  offset,
const int8_t *  value,
const int64_t  size_bytes 
)

Definition at line 854 of file RuntimeFunctions.cpp.

857  {
858  return nullptr;
859 }
ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 403 of file RuntimeFunctions.cpp.

403  {
404  *agg = std::max(*agg, val);
405 }
ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 637 of file RuntimeFunctions.cpp.

637  {
638  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
639  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
640 }
ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 678 of file RuntimeFunctions.cpp.

678  {
679  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
680  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
681 }
GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 898 of file RuntimeFunctions.cpp.

900  {}
GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 902 of file RuntimeFunctions.cpp.

904  {}
ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 407 of file RuntimeFunctions.cpp.

407  {
408  *agg = std::min(*agg, val);
409 }
ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 642 of file RuntimeFunctions.cpp.

642  {
643  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
644  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
645 }
ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 683 of file RuntimeFunctions.cpp.

683  {
684  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
685  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
686 }
GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 906 of file RuntimeFunctions.cpp.

908  {}
GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 910 of file RuntimeFunctions.cpp.

912  {}
ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 397 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

397  {
398  const auto old = *agg;
399  *agg += val;
400  return old;
401 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 632 of file RuntimeFunctions.cpp.

632  {
633  const auto r = *reinterpret_cast<const double*>(agg) + val;
634  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
635 }
GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 935 of file RuntimeFunctions.cpp.

935 {}
GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 937 of file RuntimeFunctions.cpp.

939  {}
ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 673 of file RuntimeFunctions.cpp.

673  {
674  const auto r = *reinterpret_cast<const float*>(agg) + val;
675  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
676 }
GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 940 of file RuntimeFunctions.cpp.

940 {}
GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 942 of file RuntimeFunctions.cpp.

944  {}
ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 465 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

465  {
466  const auto old = *agg;
467  *agg += val;
468  return old;
469 }

+ Here is the caller graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 925 of file RuntimeFunctions.cpp.

925  {
926  return 0;
927 }
ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 550 of file RuntimeFunctions.cpp.

References agg_sum_int32().

552  {
553  const auto old = *agg;
554  if (val != skip_val) {
555  if (old != skip_val) {
556  return agg_sum_int32(agg, val);
557  } else {
558  *agg = val;
559  }
560  }
561  return old;
562 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 929 of file RuntimeFunctions.cpp.

931  {
932  return 0;
933 }
GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 916 of file RuntimeFunctions.cpp.

916  {
917  return 0;
918 }
ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 536 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

538  {
539  const auto old = *agg;
540  if (val != skip_val) {
541  if (old != skip_val) {
542  return agg_sum(agg, val);
543  } else {
544  *agg = val;
545  }
546  }
547  return old;
548 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 920 of file RuntimeFunctions.cpp.

922  {
923  return 0;
924 }
ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 375 of file RuntimeFunctions.cpp.

380  {
381  if (val == null_val) {
382  return null_bool_val;
383  }
384  if (val < min_val || val > max_val) {
385  return 0;
386  }
387  if (!bitset) {
388  return 0;
389  }
390  const uint64_t bitmap_idx = val - min_val;
391  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
392  (1 << (bitmap_idx & 7))
393  ? 1
394  : 0;
395 }
ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1295 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1296  {
1297  return str_len;
1298 }

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1300 of file RuntimeFunctions.cpp.

1302  {
1303  if (!str) {
1304  return int_null;
1305  }
1306  return str_len;
1307 }
ALWAYS_INLINE DEVICE bool check_interrupt ( )

Definition at line 1578 of file RuntimeFunctions.cpp.

References check_interrupt_init(), INT_CHECK, and runtime_interrupt_flag.

1578  {
1579  if (check_interrupt_init(static_cast<unsigned>(INT_CHECK))) {
1580  return true;
1581  }
1582  return false;
1583 }
bool check_interrupt_init(unsigned command)

+ Here is the call graph for this function:

bool check_interrupt_init ( unsigned  command)

Definition at line 1585 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, INT_RESET, and runtime_interrupt_flag.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

1585  {
1586  static std::atomic_bool runtime_interrupt_flag{false};
1587 
1588  if (command == static_cast<unsigned>(INT_CHECK)) {
1589  if (runtime_interrupt_flag.load()) {
1590  return true;
1591  }
1592  return false;
1593  }
1594  if (command == static_cast<unsigned>(INT_ABORT)) {
1595  runtime_interrupt_flag.store(true);
1596  return false;
1597  }
1598  if (command == static_cast<unsigned>(INT_RESET)) {
1599  runtime_interrupt_flag.store(false);
1600  return false;
1601  }
1602  return false;
1603 }
__device__ int32_t runtime_interrupt_flag
Definition: cuda_mapd_rt.cu:96

+ Here is the caller graph for this function:

ALWAYS_INLINE int32_t checked_single_agg_id ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 425 of file RuntimeFunctions.cpp.

427  {
428  if (val == null_val) {
429  return 0;
430  }
431 
432  if (*agg == val) {
433  return 0;
434  } else if (*agg == null_val) {
435  *agg = val;
436  return 0;
437  } else {
438  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
439  return 15;
440  }
441 }
ALWAYS_INLINE int32_t checked_single_agg_id_double ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 651 of file RuntimeFunctions.cpp.

653  {
654  if (val == null_val) {
655  return 0;
656  }
657 
658  if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)))) {
659  return 0;
660  } else if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&null_val)))) {
661  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
662  return 0;
663  } else {
664  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
665  return 15;
666  }
667 }
GPU_RT_STUB int32_t checked_single_agg_id_double_shared ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 886 of file RuntimeFunctions.cpp.

888  {
889  return 0;
890 }
ALWAYS_INLINE int32_t checked_single_agg_id_float ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 692 of file RuntimeFunctions.cpp.

694  {
695  if (val == null_val) {
696  return 0;
697  }
698 
699  if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)))) {
700  return 0;
701  } else if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&null_val)))) {
702  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
703  return 0;
704  } else {
705  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
706  return 15;
707  }
708 }
GPU_RT_STUB int32_t checked_single_agg_id_float_shared ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 892 of file RuntimeFunctions.cpp.

894  {
895  return 0;
896 }
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  null_val 
)

Definition at line 874 of file RuntimeFunctions.cpp.

876  {
877  return 0;
878 }
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  null_val 
)

Definition at line 868 of file RuntimeFunctions.cpp.

870  {
871  return 0;
872 }
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  null_val 
)

Definition at line 879 of file RuntimeFunctions.cpp.

881  {
882  return 0;
883 }
GPU_RT_STUB int32_t checked_single_agg_id_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 861 of file RuntimeFunctions.cpp.

863  {
864  return 0;
865 }
ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 778 of file RuntimeFunctions.cpp.

References decimal_floor().

778  {
779  return decimal_floor(x, scale) + (x % scale ? scale : 0);
780 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 768 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

768  {
769  if (x >= 0) {
770  return x / scale * scale;
771  }
772  if (!(x % scale)) {
773  return x;
774  }
775  return x / scale * scale - scale;
776 }

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t* declare_dynamic_shared_memory ( )

Definition at line 982 of file RuntimeFunctions.cpp.

982  {
983  return nullptr;
984 }
ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1270 of file RuntimeFunctions.cpp.

Referenced by extract_str_len_noinline().

1270  {
1271  return static_cast<int64_t>(str_and_len) >> 48;
1272 }

+ Here is the caller graph for this function:

NEVER_INLINE int32_t extract_str_len_noinline ( const uint64_t  str_and_len)

Definition at line 1278 of file RuntimeFunctions.cpp.

References extract_str_len().

Referenced by string_compress().

1278  {
1279  return extract_str_len(str_and_len);
1280 }
ALWAYS_INLINE int32_t extract_str_len(const uint64_t str_and_len)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1266 of file RuntimeFunctions.cpp.

Referenced by extract_str_ptr_noinline().

1266  {
1267  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1268 }

+ Here is the caller graph for this function:

NEVER_INLINE int8_t* extract_str_ptr_noinline ( const uint64_t  str_and_len)

Definition at line 1274 of file RuntimeFunctions.cpp.

References extract_str_ptr().

Referenced by string_compress().

1274  {
1275  return extract_str_ptr(str_and_len);
1276 }
ALWAYS_INLINE int8_t * extract_str_ptr(const uint64_t str_and_len)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE int64_t floor_div_lhs ( const int64_t  dividend,
const int64_t  divisor 
)

Definition at line 207 of file RuntimeFunctions.cpp.

Referenced by floor_div_nullable_lhs().

208  {
209  return (dividend < 0 ? dividend - (divisor - 1) : dividend) / divisor;
210 }

+ Here is the caller graph for this function:

ALWAYS_INLINE int64_t floor_div_nullable_lhs ( const int64_t  dividend,
const int64_t  divisor,
const int64_t  null_val 
)

Definition at line 214 of file RuntimeFunctions.cpp.

References floor_div_lhs().

216  {
217  return dividend == null_val ? null_val : floor_div_lhs(dividend, divisor);
218 }
ALWAYS_INLINE int64_t floor_div_lhs(const int64_t dividend, const int64_t divisor)

+ Here is the call graph for this function:

GPU_RT_STUB void force_sync ( )

Definition at line 946 of file RuntimeFunctions.cpp.

946 {}
GPU_RT_STUB int64_t get_block_index ( )

Definition at line 986 of file RuntimeFunctions.cpp.

986  {
987  return 0;
988 }
ALWAYS_INLINE int32_t get_error_code ( int32_t *  error_codes)

Definition at line 1005 of file RuntimeFunctions.cpp.

References pos_start_impl().

1005  {
1006  return error_codes[pos_start_impl(nullptr)];
1007 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:28

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1246 of file RuntimeFunctions.cpp.

1251  {
1252  return groups_buffer + row_size_quad * (key - min_key);
1253 }
ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1255 of file RuntimeFunctions.cpp.

1262  {
1263  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1264 }
__device__ int8_t thread_warp_idx(const int8_t warp_sz)
Definition: cuda_mapd_rt.cu:40
template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1075 of file RuntimeFunctions.cpp.

References align_to_int64(), and omnisci.dtypes::T.

1079  {
1080  auto off = h * row_size_quad;
1081  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
1082  if (*row_ptr == get_empty_key<T>()) {
1083  memcpy(row_ptr, key, key_count * sizeof(T));
1084  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1085  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1086  }
1087  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
1088  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1089  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1090  }
1091  return nullptr;
1092 }
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 1094 of file RuntimeFunctions.cpp.

References get_matching_group_value().

1099  {
1100  switch (key_width) {
1101  case 4:
1102  return get_matching_group_value(groups_buffer,
1103  h,
1104  reinterpret_cast<const int32_t*>(key),
1105  key_count,
1106  row_size_quad);
1107  case 8:
1108  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
1109  default:;
1110  }
1111  return nullptr;
1112 }
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 1162 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and i.

1167  {
1168  auto off = h;
1169  if (groups_buffer[off] == EMPTY_KEY_64) {
1170  for (size_t i = 0; i < key_qw_count; ++i) {
1171  groups_buffer[off] = key[i];
1172  off += entry_count;
1173  }
1174  return &groups_buffer[off];
1175  }
1176  off = h;
1177  for (size_t i = 0; i < key_qw_count; ++i) {
1178  if (groups_buffer[off] != key[i]) {
1179  return nullptr;
1180  }
1181  off += entry_count;
1182  }
1183  return &groups_buffer[off];
1184 }
#define EMPTY_KEY_64
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 1115 of file RuntimeFunctions.cpp.

References i, and omnisci.dtypes::T.

1119  {
1120  auto off = h;
1121  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
1122  if (key_buffer[off] == get_empty_key<T>()) {
1123  for (size_t i = 0; i < key_count; ++i) {
1124  key_buffer[off] = key[i];
1125  off += entry_count;
1126  }
1127  return h;
1128  }
1129  off = h;
1130  for (size_t i = 0; i < key_count; ++i) {
1131  if (key_buffer[off] != key[i]) {
1132  return -1;
1133  }
1134  off += entry_count;
1135  }
1136  return h;
1137 }
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 1140 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

1145  {
1146  switch (key_width) {
1147  case 4:
1148  return get_matching_group_value_columnar_slot(groups_buffer,
1149  entry_count,
1150  h,
1151  reinterpret_cast<const int32_t*>(key),
1152  key_count);
1153  case 8:
1155  groups_buffer, entry_count, h, key, key_count);
1156  default:
1157  return -1;
1158  }
1159  return -1;
1160 }
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1197 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and i.

1202  {
1203  uint32_t off = hashed_index * row_size_quad;
1204  if (groups_buffer[off] == EMPTY_KEY_64) {
1205  for (uint32_t i = 0; i < key_count; ++i) {
1206  groups_buffer[off + i] = key[i];
1207  }
1208  }
1209  return groups_buffer + off + key_count;
1210 }
#define EMPTY_KEY_64
ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 1218 of file RuntimeFunctions.cpp.

1221  {
1222  return groups_buffer + row_size_quad * hashed_index;
1223 }
GPU_RT_STUB int64_t get_thread_index ( )

Definition at line 978 of file RuntimeFunctions.cpp.

978  {
979  return 0;
980 }
NEVER_INLINE int32_t group_buff_idx_impl ( )

Definition at line 966 of file RuntimeFunctions.cpp.

References pos_start_impl().

966  {
967  return pos_start_impl(nullptr);
968 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:28

+ Here is the call graph for this function:

NEVER_INLINE void init_columnar_group_by_buffer_gpu ( int64_t *  groups_buffer,
const int64_t *  init_vals,
const uint32_t  groups_buffer_entry_count,
const uint32_t  key_qw_count,
const uint32_t  agg_col_count,
const bool  keyless,
const bool  blocks_share_memory,
const int32_t  frag_idx 
)

Definition at line 1045 of file RuntimeFunctions.cpp.

1053  {
1054 #ifndef _WIN32
1055  // the body is not really needed, just make sure the call is not optimized away
1056  assert(groups_buffer);
1057 #endif
1058 }
NEVER_INLINE void init_group_by_buffer_gpu ( int64_t *  groups_buffer,
const int64_t *  init_vals,
const uint32_t  groups_buffer_entry_count,
const uint32_t  key_qw_count,
const uint32_t  agg_col_count,
const bool  keyless,
const int8_t  warp_size 
)

Definition at line 1031 of file RuntimeFunctions.cpp.

1038  {
1039 #ifndef _WIN32
1040  // the body is not really needed, just make sure the call is not optimized away
1041  assert(groups_buffer);
1042 #endif
1043 }
NEVER_INLINE void init_group_by_buffer_impl ( int64_t *  groups_buffer,
const int64_t *  init_vals,
const uint32_t  groups_buffer_entry_count,
const uint32_t  key_qw_count,
const uint32_t  agg_col_count,
const bool  keyless,
const int8_t  warp_size 
)

Definition at line 1060 of file RuntimeFunctions.cpp.

1067  {
1068 #ifndef _WIN32
1069  // the body is not really needed, just make sure the call is not optimized away
1070  assert(groups_buffer);
1071 #endif
1072 }
int64_t* init_shared_mem ( const int64_t *  global_groups_buffer,
const int32_t  groups_buffer_size 
)

Definition at line 1026 of file RuntimeFunctions.cpp.

1027  {
1028  return nullptr;
1029 }
NEVER_INLINE const int64_t* init_shared_mem_nop ( const int64_t *  groups_buffer,
const int32_t  groups_buffer_size 
)

Definition at line 1011 of file RuntimeFunctions.cpp.

1013  {
1014  return groups_buffer;
1015 }
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1309 of file RuntimeFunctions.cpp.

1309  {
1310  return str_id;
1311 }
NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1482 of file RuntimeFunctions.cpp.

References MurmurHash3().

1485  {
1486  const uint32_t bit_pos = MurmurHash3(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1487  const uint32_t word_idx = bit_pos / 32;
1488  const uint32_t bit_idx = bit_pos % 32;
1489  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1490 }
RUNTIME_EXPORT NEVER_INLINE DEVICE uint32_t MurmurHash3(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:33

+ Here is the call graph for this function:

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1461 of file RuntimeFunctions.cpp.

1464  {
1465  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1466 }
int count
ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1468 of file RuntimeFunctions.cpp.

1470  {
1471  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1472  : null_val;
1473 }
int count
ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1475 of file RuntimeFunctions.cpp.

1477  {
1478  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1479  : null_val;
1480 }
int count
ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1455 of file RuntimeFunctions.cpp.

1457  {
1458  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1459 }
int count
ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1447 of file RuntimeFunctions.cpp.

1447  {
1448  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1449 }
ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1451 of file RuntimeFunctions.cpp.

1451  {
1452  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1453 }
ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 308 of file RuntimeFunctions.cpp.

310  {
311  if (lhs == null_val) {
312  return rhs == 0 ? rhs : null_val;
313  }
314  if (rhs == null_val) {
315  return lhs == 0 ? lhs : null_val;
316  }
317  return (lhs && rhs) ? 1 : 0;
318 }
ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 304 of file RuntimeFunctions.cpp.

304  {
305  return operand == null_val ? operand : (operand ? 0 : 1);
306 }
ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 320 of file RuntimeFunctions.cpp.

322  {
323  if (lhs == null_val) {
324  return rhs == 0 ? null_val : rhs;
325  }
326  if (rhs == null_val) {
327  return lhs == 0 ? null_val : lhs;
328  }
329  return (lhs || rhs) ? 1 : 0;
330 }
void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1553 of file RuntimeFunctions.cpp.

References i, and query_stub().

1563  {
1564  for (uint32_t i = 0; i < *num_fragments; ++i) {
1565  query_stub(col_buffers ? col_buffers[i] : nullptr,
1566  &num_rows[i * (*num_tables_ptr)],
1567  &frag_row_offsets[i * (*num_tables_ptr)],
1568  max_matched,
1569  init_agg_value,
1570  out,
1571  i,
1572  join_hash_tables,
1573  total_matched,
1574  error_code);
1575  }
1576 }
NEVER_INLINE void query_stub(const int8_t **col_buffers, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, const int64_t *init_agg_value, int64_t **out, uint32_t frag_idx, const int64_t *join_hash_tables, int32_t *error_code, int32_t *total_matched)

+ Here is the call graph for this function:

void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1510 of file RuntimeFunctions.cpp.

References i, and query_stub_hoisted_literals().

1521  {
1522  for (uint32_t i = 0; i < *num_fragments; ++i) {
1523  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1524  literals,
1525  &num_rows[i * (*num_tables_ptr)],
1526  &frag_row_offsets[i * (*num_tables_ptr)],
1527  max_matched,
1528  init_agg_value,
1529  out,
1530  i,
1531  join_hash_tables,
1532  total_matched,
1533  error_code);
1534  }
1535 }
NEVER_INLINE void query_stub_hoisted_literals(const int8_t **col_buffers, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, const int64_t *init_agg_value, int64_t **out, uint32_t frag_idx, const int64_t *join_hash_tables, int32_t *error_code, int32_t *total_matched)

+ Here is the call graph for this function:

ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1442 of file RuntimeFunctions.cpp.

1443  {
1444  return reinterpret_cast<const double*>(output_buff)[pos];
1445 }
NEVER_INLINE int32_t pos_start_impl ( int32_t *  error_code)

Definition at line 957 of file RuntimeFunctions.cpp.

957  {
958  int32_t row_index_resume{0};
959  if (error_code) {
960  row_index_resume = error_code[0];
961  error_code[0] = 0;
962  }
963  return row_index_resume;
964 }
NEVER_INLINE int32_t pos_step_impl ( )

Definition at line 970 of file RuntimeFunctions.cpp.

970  {
971  return 1;
972 }
NEVER_INLINE void query_stub ( const int8_t **  col_buffers,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
const int64_t *  init_agg_value,
int64_t **  out,
uint32_t  frag_idx,
const int64_t *  join_hash_tables,
int32_t *  error_code,
int32_t *  total_matched 
)

Definition at line 1537 of file RuntimeFunctions.cpp.

Referenced by multifrag_query().

1546  {
1547 #ifndef _WIN32
1548  assert(col_buffers || num_rows || frag_row_offsets || max_matched || init_agg_value ||
1549  out || frag_idx || error_code || join_hash_tables || total_matched);
1550 #endif
1551 }

+ Here is the caller graph for this function:

NEVER_INLINE void query_stub_hoisted_literals ( const int8_t **  col_buffers,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
const int64_t *  init_agg_value,
int64_t **  out,
uint32_t  frag_idx,
const int64_t *  join_hash_tables,
int32_t *  error_code,
int32_t *  total_matched 
)

Definition at line 1492 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals().

1502  {
1503 #ifndef _WIN32
1504  assert(col_buffers || literals || num_rows || frag_row_offsets || max_matched ||
1505  init_agg_value || out || frag_idx || error_code || join_hash_tables ||
1506  total_matched);
1507 #endif
1508 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 992 of file RuntimeFunctions.cpp.

References pos_start_impl().

993  {
994  // NB: never override persistent error codes (with code greater than zero).
995  // On GPU, a projection query with a limit can run out of slots without it
996  // being an actual error if the limit has been hit. If a persistent error
997  // (division by zero, for example) occurs before running out of slots, we
998  // have to avoid overriding it, because there's a risk that the query would
999  // go through if we override with a potentially benign out-of-slots code.
1000  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
1001  error_codes[pos_start_impl(nullptr)] = err_code;
1002  }
1003 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:28

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1437 of file RuntimeFunctions.cpp.

1438  {
1439  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1440 }
ALWAYS_INLINE DEVICE bool sample_ratio ( const double  proportion,
const int64_t  row_offset 
)

Definition at line 1313 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1314  {
1315  const int64_t threshold = 4294967296 * proportion;
1316  return (row_offset * 2654435761) % 4294967296 < threshold;
1317 }

+ Here is the caller graph for this function:

ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 197 of file RuntimeFunctions.cpp.

199  {
200  int64_t tmp = scale >> 1;
201  tmp = operand >= 0 ? operand + tmp : operand - tmp;
202  return tmp / scale;
203 }
ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 184 of file RuntimeFunctions.cpp.

186  {
187  // rounded scale down of a decimal
188  if (operand == null_val) {
189  return null_val;
190  }
191 
192  int64_t tmp = scale >> 1;
193  tmp = operand >= 0 ? operand + tmp : operand - tmp;
194  return tmp / scale;
195 }
ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 177 of file RuntimeFunctions.cpp.

180  {
181  return operand != operand_null_val ? operand * scale : result_null_val;
182 }
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1229 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and i.

1234  {
1235  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1236  for (uint32_t i = 0; i < key_count; i++) {
1237  groups_buffer[i * entry_count + hashed_index] = key[i];
1238  }
1239  }
1240 }
#define EMPTY_KEY_64
ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1282 of file RuntimeFunctions.cpp.

1282  {
1283  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1284  (static_cast<const uint64_t>(len) << 48);
1285 }
GPU_RT_STUB void sync_threadblock ( )

Definition at line 950 of file RuntimeFunctions.cpp.

950 {}
GPU_RT_STUB void sync_warp ( )

Definition at line 948 of file RuntimeFunctions.cpp.

948 {}
GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 949 of file RuntimeFunctions.cpp.

949 {}
GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 974 of file RuntimeFunctions.cpp.

974  {
975  return 0;
976 }
ALWAYS_INLINE DEVICE double width_bucket ( const double  target_value,
const double  lower_bound,
const double  upper_bound,
const double  scale_factor,
const int32_t  partition_count 
)

Definition at line 1319 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit(), width_bucket_expr(), and width_bucket_nullable().

1323  {
1324  if (target_value < lower_bound) {
1325  return 0;
1326  } else if (target_value >= upper_bound) {
1327  return partition_count + 1;
1328  }
1329  return ((target_value - lower_bound) * scale_factor) + 1;
1330 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_expr ( const double  target_value,
const bool  reversed,
const double  lower_bound,
const double  upper_bound,
const int32_t  partition_count 
)

Definition at line 1390 of file RuntimeFunctions.cpp.

References width_bucket(), and width_bucket_reversed().

Referenced by CodeGenerator::codegen(), getExpressionRange(), ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit(), and width_bucket_expr_nullable().

1394  {
1395  if (reversed) {
1396  return width_bucket_reversed(target_value,
1397  lower_bound,
1398  upper_bound,
1399  partition_count / (lower_bound - upper_bound),
1400  partition_count);
1401  }
1402  return width_bucket(target_value,
1403  lower_bound,
1404  upper_bound,
1405  partition_count / (upper_bound - lower_bound),
1406  partition_count);
1407 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
ALWAYS_INLINE DEVICE double width_bucket_reversed(const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78
ALWAYS_INLINE DEVICE double width_bucket(const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_expr_no_oob_check ( const double  target_value,
const bool  reversed,
const double  lower_bound,
const double  upper_bound,
const int32_t  partition_count 
)

Definition at line 1423 of file RuntimeFunctions.cpp.

References width_bucket_no_oob_check(), and width_bucket_reversed_no_oob_check().

1428  {
1429  if (reversed) {
1431  target_value, lower_bound, partition_count / (lower_bound - upper_bound));
1432  }
1434  target_value, lower_bound, partition_count / (upper_bound - lower_bound));
1435 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
ALWAYS_INLINE DEVICE double width_bucket_reversed_no_oob_check(const double target_value, const double lower_bound, const double scale_factor)
ALWAYS_INLINE DEVICE double width_bucket_no_oob_check(const double target_value, const double lower_bound, const double scale_factor)
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_expr_nullable ( const double  target_value,
const bool  reversed,
const double  lower_bound,
const double  upper_bound,
const int32_t  partition_count,
const double  null_val 
)

Definition at line 1409 of file RuntimeFunctions.cpp.

References INT32_MIN, and width_bucket_expr().

1415  {
1416  if (target_value == null_val) {
1417  return INT32_MIN;
1418  }
1419  return width_bucket_expr(
1420  target_value, reversed, lower_bound, upper_bound, partition_count);
1421 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
#define INT32_MIN
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78
ALWAYS_INLINE DEVICE double width_bucket_expr(const double target_value, const bool reversed, const double lower_bound, const double upper_bound, const int32_t partition_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_no_oob_check ( const double  target_value,
const double  lower_bound,
const double  scale_factor 
)

Definition at line 1376 of file RuntimeFunctions.cpp.

Referenced by width_bucket_expr_no_oob_check().

1379  {
1380  return ((target_value - lower_bound) * scale_factor) + 1;
1381 }
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the caller graph for this function:

ALWAYS_INLINE double width_bucket_nullable ( const double  target_value,
const double  lower_bound,
const double  upper_bound,
const double  scale_factor,
const int32_t  partition_count,
const double  null_val 
)

Definition at line 1346 of file RuntimeFunctions.cpp.

References INT32_MIN, and width_bucket().

1351  {
1352  if (target_value == null_val) {
1353  return INT32_MIN;
1354  }
1355  return width_bucket(
1356  target_value, lower_bound, upper_bound, scale_factor, partition_count);
1357 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
#define INT32_MIN
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78
ALWAYS_INLINE DEVICE double width_bucket(const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_reversed ( const double  target_value,
const double  lower_bound,
const double  upper_bound,
const double  scale_factor,
const int32_t  partition_count 
)

Definition at line 1332 of file RuntimeFunctions.cpp.

Referenced by width_bucket_expr(), and width_bucket_reversed_nullable().

1337  {
1338  if (target_value > lower_bound) {
1339  return 0;
1340  } else if (target_value <= upper_bound) {
1341  return partition_count + 1;
1342  }
1343  return ((lower_bound - target_value) * scale_factor) + 1;
1344 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE double width_bucket_reversed_no_oob_check ( const double  target_value,
const double  lower_bound,
const double  scale_factor 
)

Definition at line 1383 of file RuntimeFunctions.cpp.

Referenced by width_bucket_expr_no_oob_check().

1386  {
1387  return ((lower_bound - target_value) * scale_factor) + 1;
1388 }
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the caller graph for this function:

ALWAYS_INLINE double width_bucket_reversed_nullable ( const double  target_value,
const double  lower_bound,
const double  upper_bound,
const double  scale_factor,
const int32_t  partition_count,
const double  null_val 
)

Definition at line 1359 of file RuntimeFunctions.cpp.

References INT32_MIN, and width_bucket_reversed().

1365  {
1366  if (target_value == null_val) {
1367  return INT32_MIN;
1368  }
1369  return width_bucket_reversed(
1370  target_value, lower_bound, upper_bound, scale_factor, partition_count);
1371 }
DEVICE auto upper_bound(ARGS &&...args)
Definition: gpu_enabled.h:123
#define INT32_MIN
ALWAYS_INLINE DEVICE double width_bucket_reversed(const double target_value, const double lower_bound, const double upper_bound, const double scale_factor, const int32_t partition_count)
DEVICE auto lower_bound(ARGS &&...args)
Definition: gpu_enabled.h:78

+ Here is the call graph for this function:

GPU_RT_STUB void write_back_non_grouped_agg ( int64_t *  input_buffer,
int64_t *  output_buffer,
const int32_t  num_agg_cols 
)

Definition at line 952 of file RuntimeFunctions.cpp.

954  {};
NEVER_INLINE void write_back_nop ( int64_t *  dest,
int64_t *  src,
const int32_t  sz 
)

Definition at line 1017 of file RuntimeFunctions.cpp.

1019  {
1020 #ifndef _WIN32
1021  // the body is not really needed, just make sure the call is not optimized away
1022  assert(dest);
1023 #endif
1024 }