OmniSciDB  1dac507f6e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "TypePunning.h"
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
 __attribute__ ((noinline)) int32_t pos_start_impl(int32_t *error_code)
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
ALWAYS_INLINE int32_t record_error_code (const int32_t err_code, int32_t *error_codes)
 
int64_t const int32_t sz assert (dest)
 
const int32_t
groups_buffer_size return 
init_shared_mem_nop (groups_buffer, groups_buffer_size)
 
int64_t const int32_t sz write_back_nop (dest, src, sz)
 
int64_t const int32_t sz return agg_from_smem_to_gmem_nop (dest, src, sz)
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const int8_t
warp_size 
assert (groups_buffer)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t int32_t
*total_matched 
assert (col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
const int64_t const uint64_t
const int32_t const int64_t
int64_t uint32_t const int64_t
int32_t int32_t *total_matched 
assert (col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 

Variables

const int32_t
groups_buffer_size return 
groups_buffer
 
int64_t * src
 
const int32_t
groups_buffer_size return 
nullptr
 
const int64_t * init_vals
 
const int64_t const uint32_t groups_buffer_entry_count
 
const int64_t const uint32_t
const uint32_t 
key_qw_count
 
const int64_t const uint32_t
const uint32_t const uint32_t 
agg_col_count
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool 
keyless
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const bool 
blocks_share_memory
 
const int8_t * literals
 
const int8_t const int64_t * num_rows
 
const int8_t const int64_t
const uint64_t * 
frag_row_offsets
 
const int8_t const int64_t
const uint64_t const int32_t * 
max_matched
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t * 
init_agg_value
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t ** 
out
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t 
frag_idx
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t * 
join_hash_tables
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t * 
error_code
 

Macro Definition Documentation

#define ADDR_T   int64_t

Definition at line 612 of file RuntimeFunctions.cpp.

#define ADDR_T   int32_t

Definition at line 612 of file RuntimeFunctions.cpp.

#define DATA_T   int64_t

Definition at line 611 of file RuntimeFunctions.cpp.

#define DATA_T   int32_t

Definition at line 611 of file RuntimeFunctions.cpp.

#define DATA_T   int16_t

Definition at line 611 of file RuntimeFunctions.cpp.

#define DATA_T   int8_t

Definition at line 611 of file RuntimeFunctions.cpp.

#define DATA_T   double

Definition at line 611 of file RuntimeFunctions.cpp.

#define DATA_T   float

Definition at line 611 of file RuntimeFunctions.cpp.

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
#define ALWAYS_INLINE

Definition at line 404 of file RuntimeFunctions.cpp.

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 384 of file RuntimeFunctions.cpp.

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 394 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 41 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 50 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 59 of file RuntimeFunctions.cpp.

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 113 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 216 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 224 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 68 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 80 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 92 of file RuntimeFunctions.cpp.

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 104 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 638 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 677 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 590 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 590 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 582 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 582 of file RuntimeFunctions.cpp.

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 202 of file RuntimeFunctions.cpp.

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
#define ALWAYS_INLINE

Definition at line 414 of file RuntimeFunctions.cpp.

#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))

Definition at line 290 of file RuntimeFunctions.cpp.

Function Documentation

__attribute__ ( (noinline)  )

Definition at line 763 of file RuntimeFunctions.cpp.

763  {
764  int32_t row_index_resume{0};
765  if (error_code) {
766  row_index_resume = error_code[0];
767  error_code[0] = 0;
768  }
769  return row_index_resume;
770 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 300 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

302  {
303  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
304  const uint32_t index = hash >> (64 - b);
305  const uint8_t rank = get_rank(hash << b, 64 - b);
306  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
307  M[index] = std::max(M[index], rank);
308 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:26

+ Here is the call graph for this function:

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 310 of file RuntimeFunctions.cpp.

314  {}
ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 279 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

279  {
280  return (*agg)++;
281 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 283 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

285  {
286  const uint64_t bitmap_idx = val - min_val;
287  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
288 }

+ Here is the caller graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 292 of file RuntimeFunctions.cpp.

298  {}
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 356 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

359  {
360  if (val != skip_val) {
361  agg_count_distinct_bitmap(agg, val, min_val);
362  }
363 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)

+ Here is the call graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 365 of file RuntimeFunctions.cpp.

372  {}
ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 518 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

518  {
519  return (*agg)++;
520 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 564 of file RuntimeFunctions.cpp.

References agg_count_double().

566  {
567  if (val != skip_val) {
568  return agg_count_double(agg, val);
569  }
570  return *agg;
571 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 541 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

541  {
542  return (*agg)++;
543 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 573 of file RuntimeFunctions.cpp.

References agg_count_float().

575  {
576  if (val != skip_val) {
577  return agg_count_float(agg, val);
578  }
579  return *agg;
580 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 374 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

374  {
375  return (*agg)++;
376 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 463 of file RuntimeFunctions.cpp.

References agg_count_int32().

465  {
466  if (val != skip_val) {
467  return agg_count_int32(agg, val);
468  }
469  return *agg;
470 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)

+ Here is the call graph for this function:

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 454 of file RuntimeFunctions.cpp.

References agg_count().

456  {
457  if (val != skip_val) {
458  return agg_count(agg, val);
459  }
460  return *agg;
461 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)

+ Here is the call graph for this function:

int64_t const int32_t sz return agg_from_smem_to_gmem_nop ( dest  ,
src  ,
sz   
)
ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 352 of file RuntimeFunctions.cpp.

352  {
353  *agg = val;
354 }
ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 537 of file RuntimeFunctions.cpp.

537  {
538  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
539 }
GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 724 of file RuntimeFunctions.cpp.

724 {}
ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 560 of file RuntimeFunctions.cpp.

560  {
561  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
562 }
ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 344 of file RuntimeFunctions.cpp.

344  {
345  *agg = std::max(*agg, val);
346 }
ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 527 of file RuntimeFunctions.cpp.

527  {
528  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
529  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
530 }
ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 550 of file RuntimeFunctions.cpp.

550  {
551  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
552  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
553 }
GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 708 of file RuntimeFunctions.cpp.

710  {}
GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 712 of file RuntimeFunctions.cpp.

714  {}
ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 348 of file RuntimeFunctions.cpp.

348  {
349  *agg = std::min(*agg, val);
350 }
ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 532 of file RuntimeFunctions.cpp.

532  {
533  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
534  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
535 }
ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 555 of file RuntimeFunctions.cpp.

555  {
556  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
557  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
558 }
GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 716 of file RuntimeFunctions.cpp.

718  {}
GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 720 of file RuntimeFunctions.cpp.

722  {}
ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 338 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

338  {
339  const auto old = *agg;
340  *agg += val;
341  return old;
342 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 522 of file RuntimeFunctions.cpp.

522  {
523  const auto r = *reinterpret_cast<const double*>(agg) + val;
524  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
525 }
GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 745 of file RuntimeFunctions.cpp.

745 {}
GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 747 of file RuntimeFunctions.cpp.

749  {}
ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 545 of file RuntimeFunctions.cpp.

545  {
546  const auto r = *reinterpret_cast<const float*>(agg) + val;
547  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
548 }
GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 750 of file RuntimeFunctions.cpp.

750 {}
GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 752 of file RuntimeFunctions.cpp.

754  {}
ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 378 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

378  {
379  const auto old = *agg;
380  *agg += val;
381  return old;
382 }

+ Here is the caller graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 735 of file RuntimeFunctions.cpp.

735  {
736  return 0;
737 }
ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 440 of file RuntimeFunctions.cpp.

References agg_sum_int32().

442  {
443  const auto old = *agg;
444  if (val != skip_val) {
445  if (old != skip_val) {
446  return agg_sum_int32(agg, val);
447  } else {
448  *agg = val;
449  }
450  }
451  return old;
452 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 739 of file RuntimeFunctions.cpp.

741  {
742  return 0;
743 }
GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 726 of file RuntimeFunctions.cpp.

726  {
727  return 0;
728 }
ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 426 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

428  {
429  const auto old = *agg;
430  if (val != skip_val) {
431  if (old != skip_val) {
432  return agg_sum(agg, val);
433  } else {
434  *agg = val;
435  }
436  }
437  return old;
438 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 730 of file RuntimeFunctions.cpp.

732  {
733  return 0;
734 }
int64_t const int32_t sz assert ( dest  )

Referenced by org.apache.calcite.prepare.MapDSqlAdvisor::applyPermissionsToTableHints(), checkCudaErrors(), org.apache.calcite.sql2rel.SqlToRelConverter::collectInsertTargets(), org.apache.calcite.sql2rel.SqlToRelConverter::convertAgg(), org.apache.calcite.sql2rel.SqlToRelConverter::convertColumnList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertCursor(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::convertExpression(), org.apache.calcite.sql2rel.SqlToRelConverter::convertFrom(), org.apache.calcite.sql2rel.SqlToRelConverter::convertIdentifier(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInsert(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInToOr(), org.apache.calcite.sql2rel.SqlToRelConverter::convertLiteralInValuesList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMatchRecognize(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMerge(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrder(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrderItem(), org.apache.calcite.sql2rel.SqlToRelConverter::convertSelectList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertUpdate(), org.apache.calcite.sql2rel.SqlToRelConverter::convertValues(), org.apache.calcite.sql2rel.SqlToRelConverter::convertWhere(), count_matches_baseline(), org.apache.calcite.sql2rel.SqlToRelConverter::createAggImpl(), org.apache.calcite.sql2rel.SqlToRelConverter::createJoin(), org.apache.calcite.sql2rel.SqlToRelConverter::createSource(), decompress(), com.mapd.parser.server.ExtensionFunctionSignatureParser::deserializeType(), com.mapd.calcite.parser.MapDSqlOperatorTable::dropSuffix(), fill_row_ids_baseline(), fixed_width_double_decode(), fixed_width_float_decode(), fixed_width_int_decode(), fixed_width_unsigned_decode(), org.apache.calcite.sql2rel.SqlToRelConverter::gatherOrderExprs(), get_join_column_element_value(), get_matching_baseline_hash_slot_readonly(), SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities >::get_storage_size(), org.apache.calcite.sql2rel.SqlToRelConverter::getCorrelationUse(), SqliteConnector::getData(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::getSubQueryExpr(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::getValueType(), CartesianProductIterator< T >::increment(), com.mapd.calcite.parser.MapDSqlOperatorTable.RowCopier::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.PgUnnest::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Any::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.All::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Now::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Datetime::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Truncate::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Contains::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Intersects::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Disjoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Within::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DFullyWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Distance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_MaxDistance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeogFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeomFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Transform::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_X::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Y::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_PointN::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_EndPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_StartPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Length::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Perimeter::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Area::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NPoints::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NRings::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SetSRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Point::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.CastToGeography::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OffsetInFragment::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_width::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_height::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_size_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.usTimestamp::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.nsTimestamp::inferReturnType(), SqliteConnector::isNull(), org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::lookupAggregates(), com.mapd.tests.SelectCopyFromDeleteConcurrencyTest::main(), org.apache.calcite.sql2rel.SqlToRelConverter::negate(), parse_numeric(), com.mapd.parser.server.ExtensionFunctionSignatureParser::pointerType(), org.apache.calcite.sql2rel.SqlToRelConverter::pushDownNotForIn(), SqliteConnector::query_with_text_params(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::register(), org.apache.calcite.sql2rel.SqlToRelConverter::setDynamicParamCountInExplain(), org.apache.calcite.sql2rel.SqlToRelConverter::substituteSubQuery(), Parser::SQLType::to_string(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::toSqlTypeName(), org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::translateAgg(), org.apache.calcite.sql2rel.SqlToRelConverter::translateIn(), com.mapd.parser.server.ExtensionFunction::typeName(), and org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::visit().

const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size assert ( groups_buffer  )
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 316 of file RuntimeFunctions.cpp.

321  {
322  if (val == null_val) {
323  return null_bool_val;
324  }
325  if (val < min_val || val > max_val) {
326  return 0;
327  }
328  if (!bitset) {
329  return 0;
330  }
331  const uint64_t bitmap_idx = val - min_val;
332  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
333  (1 << (bitmap_idx & 7))
334  ? 1
335  : 0;
336 }
ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1102 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1103  {
1104  return str_len;
1105 }

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1107 of file RuntimeFunctions.cpp.

1109  {
1110  if (!str) {
1111  return int_null;
1112  }
1113  return str_len;
1114 }
ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 632 of file RuntimeFunctions.cpp.

References decimal_floor().

632  {
633  return decimal_floor(x, scale) + (x % scale ? scale : 0);
634 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 622 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

622  {
623  if (x >= 0) {
624  return x / scale * scale;
625  }
626  if (!(x % scale)) {
627  return x;
628  }
629  return x / scale * scale - scale;
630 }

+ Here is the caller graph for this function:

ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1075 of file RuntimeFunctions.cpp.

1075  {
1076  return static_cast<int64_t>(str_and_len) >> 48;
1077 }
ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1071 of file RuntimeFunctions.cpp.

1071  {
1072  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1073 }
GPU_RT_STUB void force_sync ( )

Definition at line 756 of file RuntimeFunctions.cpp.

756 {}
ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1051 of file RuntimeFunctions.cpp.

1056  {
1057  return groups_buffer + row_size_quad * (key - min_key);
1058 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1060 of file RuntimeFunctions.cpp.

1067  {
1068  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1069 }
const int32_t groups_buffer_size return groups_buffer
__device__ int8_t thread_warp_idx(const int8_t warp_sz)
Definition: cuda_mapd_rt.cu:23
template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 893 of file RuntimeFunctions.cpp.

References align_to_int64().

897  {
898  auto off = h * row_size_quad;
899  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
900  if (*row_ptr == get_empty_key<T>()) {
901  memcpy(row_ptr, key, key_count * sizeof(T));
902  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
903  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
904  }
905  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
906  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
907  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
908  }
909  return nullptr;
910 }
const int32_t groups_buffer_size return groups_buffer
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 912 of file RuntimeFunctions.cpp.

References get_matching_group_value().

918  {
919  switch (key_width) {
920  case 4:
922  h,
923  reinterpret_cast<const int32_t*>(key),
924  key_count,
925  row_size_quad);
926  case 8:
927  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
928  default:;
929  }
930  return nullptr;
931 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 981 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and key_qw_count.

986  {
987  auto off = h;
988  if (groups_buffer[off] == EMPTY_KEY_64) {
989  for (size_t i = 0; i < key_qw_count; ++i) {
990  groups_buffer[off] = key[i];
991  off += entry_count;
992  }
993  return &groups_buffer[off];
994  }
995  off = h;
996  for (size_t i = 0; i < key_qw_count; ++i) {
997  if (groups_buffer[off] != key[i]) {
998  return nullptr;
999  }
1000  off += entry_count;
1001  }
1002  return &groups_buffer[off];
1003 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 934 of file RuntimeFunctions.cpp.

References groups_buffer.

938  {
939  auto off = h;
940  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
941  if (key_buffer[off] == get_empty_key<T>()) {
942  for (size_t i = 0; i < key_count; ++i) {
943  key_buffer[off] = key[i];
944  off += entry_count;
945  }
946  return h;
947  }
948  off = h;
949  for (size_t i = 0; i < key_count; ++i) {
950  if (key_buffer[off] != key[i]) {
951  return -1;
952  }
953  off += entry_count;
954  }
955  return h;
956 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 959 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

964  {
965  switch (key_width) {
966  case 4:
968  entry_count,
969  h,
970  reinterpret_cast<const int32_t*>(key),
971  key_count);
972  case 8:
974  groups_buffer, entry_count, h, key, key_count);
975  default:
976  return -1;
977  }
978  return -1;
979 }
const int32_t groups_buffer_size return groups_buffer
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1016 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1021  {
1022  uint32_t off = hashed_index * row_size_quad;
1023  if (groups_buffer[off] == EMPTY_KEY_64) {
1024  for (uint32_t i = 0; i < key_count; ++i) {
1025  groups_buffer[off + i] = key[i];
1026  }
1027  }
1028  return groups_buffer + off + key_count;
1029 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int32_t groups_buffer_size return init_shared_mem_nop ( groups_buffer  ,
groups_buffer_size   
)
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1116 of file RuntimeFunctions.cpp.

1116  {
1117  return str_id;
1118 }
NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1165 of file RuntimeFunctions.cpp.

References MurmurHash1().

1168  {
1169  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1170  const uint32_t word_idx = bit_pos / 32;
1171  const uint32_t bit_idx = bit_pos % 32;
1172  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1173 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1144 of file RuntimeFunctions.cpp.

1147  {
1148  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1149 }
ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1151 of file RuntimeFunctions.cpp.

1153  {
1154  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1155  : null_val;
1156 }
ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1158 of file RuntimeFunctions.cpp.

1160  {
1161  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1162  : null_val;
1163 }
ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1138 of file RuntimeFunctions.cpp.

1140  {
1141  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1142 }
ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1130 of file RuntimeFunctions.cpp.

1130  {
1131  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1132 }
ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1134 of file RuntimeFunctions.cpp.

1134  {
1135  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1136 }
ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 253 of file RuntimeFunctions.cpp.

255  {
256  if (lhs == null_val) {
257  return rhs == 0 ? rhs : null_val;
258  }
259  if (rhs == null_val) {
260  return lhs == 0 ? lhs : null_val;
261  }
262  return (lhs && rhs) ? 1 : 0;
263 }
ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 249 of file RuntimeFunctions.cpp.

249  {
250  return operand == null_val ? operand : (operand ? 0 : 1);
251 }
ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 265 of file RuntimeFunctions.cpp.

267  {
268  if (lhs == null_val) {
269  return rhs == 0 ? null_val : rhs;
270  }
271  if (rhs == null_val) {
272  return lhs == 0 ? null_val : lhs;
273  }
274  return (lhs || rhs) ? 1 : 0;
275 }
void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1233 of file RuntimeFunctions.cpp.

1243  {
1244  for (uint32_t i = 0; i < *num_fragments; ++i) {
1245  query_stub(col_buffers ? col_buffers[i] : nullptr,
1246  &num_rows[i * (*num_tables_ptr)],
1247  &frag_row_offsets[i * (*num_tables_ptr)],
1248  max_matched,
1250  out,
1251  i,
1253  total_matched,
1254  error_code);
1255  }
1256 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1192 of file RuntimeFunctions.cpp.

1203  {
1204  for (uint32_t i = 0; i < *num_fragments; ++i) {
1205  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1206  literals,
1207  &num_rows[i * (*num_tables_ptr)],
1208  &frag_row_offsets[i * (*num_tables_ptr)],
1209  max_matched,
1211  out,
1212  i,
1214  total_matched,
1215  error_code);
1216  }
1217 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1125 of file RuntimeFunctions.cpp.

1126  {
1127  return reinterpret_cast<const double*>(output_buff)[pos];
1128 }
ALWAYS_INLINE int32_t record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 786 of file RuntimeFunctions.cpp.

References pos_start_impl().

787  {
788  // NB: never override persistent error codes (with code greater than zero).
789  // On GPU, a projection query with a limit can run out of slots without it
790  // being an actual error if the limit has been hit. If a persistent error
791  // (division by zero, for example) occurs before running out of slots, we
792  // have to avoid overriding it, because there's a risk that the query would
793  // go through if we override with a potentially benign out-of-slots code.
794  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
795  error_codes[pos_start_impl(nullptr)] = err_code;
796  }
797  return err_code;
798 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:11

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1120 of file RuntimeFunctions.cpp.

1121  {
1122  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1123 }
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 194 of file RuntimeFunctions.cpp.

196  {
197  int64_t tmp = scale >> 1;
198  tmp = operand >= 0 ? operand + tmp : operand - tmp;
199  return tmp / scale;
200 }
ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 181 of file RuntimeFunctions.cpp.

183  {
184  // rounded scale down of a decimal
185  if (operand == null_val) {
186  return null_val;
187  }
188 
189  int64_t tmp = scale >> 1;
190  tmp = operand >= 0 ? operand + tmp : operand - tmp;
191  return tmp / scale;
192 }
ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 174 of file RuntimeFunctions.cpp.

177  {
178  return operand != operand_null_val ? operand * scale : result_null_val;
179 }
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1035 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1040  {
1041  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1042  for (uint32_t i = 0; i < key_count; i++) {
1043  groups_buffer[i * entry_count + hashed_index] = key[i];
1044  }
1045  }
1046 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1089 of file RuntimeFunctions.cpp.

1089  {
1090  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1091  (static_cast<const uint64_t>(len) << 48);
1092 }
GPU_RT_STUB void sync_warp ( )

Definition at line 758 of file RuntimeFunctions.cpp.

758 {}
GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 759 of file RuntimeFunctions.cpp.

759 {}
GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 780 of file RuntimeFunctions.cpp.

780  {
781  return 0;
782 }
int64_t const int32_t sz write_back_nop ( dest  ,
src  ,
sz   
)

Variable Documentation

const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory

Definition at line 869 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t frag_idx
const int64_t const uint64_t * frag_row_offsets

Definition at line 1177 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t * init_agg_value

Definition at line 1177 of file RuntimeFunctions.cpp.

const int64_t * init_vals
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int64_t const uint32_t const uint32_t const uint32_t const bool keyless
const int8_t* literals
const int64_t const uint64_t const int32_t * max_matched
const int32_t groups_buffer_size return nullptr

Definition at line 824 of file RuntimeFunctions.cpp.