OmniSciDB  343343d194
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
anonymous_namespace{ResultSetReduction.cpp} Namespace Reference

Functions

bool use_multithreaded_reduction (const size_t entry_count)
 
size_t get_row_qw_count (const QueryMemoryDescriptor &query_mem_desc)
 
std::vector< int64_t > make_key (const int64_t *buff, const size_t entry_count, const size_t key_count)
 
void fill_slots (int64_t *dst_entry, const size_t dst_entry_count, const int64_t *src_buff, const size_t src_entry_idx, const size_t src_entry_count, const QueryMemoryDescriptor &query_mem_desc)
 
ALWAYS_INLINE void fill_empty_key_32 (int32_t *key_ptr_i32, const size_t key_count)
 
ALWAYS_INLINE void fill_empty_key_64 (int64_t *key_ptr_i64, const size_t key_count)
 
int64_t get_component (const int8_t *group_by_buffer, const size_t comp_sz, const size_t index=0)
 
void run_reduction_code (const ReductionCode &reduction_code, int8_t *this_buff, const int8_t *that_buff, const int32_t start_entry_index, const int32_t end_entry_index, const int32_t that_entry_count, const void *this_qmd, const void *that_qmd, const void *serialized_varlen_buffer)
 
ALWAYS_INLINE void check_watchdog (const size_t sample_seed)
 
GroupValueInfo get_matching_group_value_columnar_reduction (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
GroupValueInfo get_group_value_columnar_reduction (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
template<typename T = int64_t>
GroupValueInfo get_matching_group_value_reduction (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
 
GroupValueInfo get_matching_group_value_reduction (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const size_t key_width, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
 

Function Documentation

ALWAYS_INLINE void anonymous_namespace{ResultSetReduction.cpp}::check_watchdog ( const size_t  sample_seed)

Definition at line 380 of file ResultSetReduction.cpp.

References dynamic_watchdog(), g_enable_dynamic_watchdog, and UNLIKELY.

Referenced by ResultSetStorage::reduceEntriesNoCollisionsColWise(), ResultSetStorage::reduceOneEntryBaseline(), and ResultSetStorage::reduceOneEntryNoCollisionsRowWise().

380  {
381  if (UNLIKELY(g_enable_dynamic_watchdog && (sample_seed & 0x3F) == 0 &&
382  dynamic_watchdog())) {
383  // TODO(alex): distinguish between the deadline and interrupt
384  throw std::runtime_error(
385  "Query execution has exceeded the time limit or was interrupted during result "
386  "set reduction");
387  }
388 }
__device__ bool dynamic_watchdog()
bool g_enable_dynamic_watchdog
Definition: Execute.cpp:70
#define UNLIKELY(x)
Definition: likely.h:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE void anonymous_namespace{ResultSetReduction.cpp}::fill_empty_key_32 ( int32_t *  key_ptr_i32,
const size_t  key_count 
)

Definition at line 91 of file ResultSetReduction.cpp.

References EMPTY_KEY_32.

Referenced by fill_empty_key(), and ResultSetStorage::initializeRowWise().

91  {
92  for (size_t i = 0; i < key_count; ++i) {
93  key_ptr_i32[i] = EMPTY_KEY_32;
94  }
95 }
#define EMPTY_KEY_32

+ Here is the caller graph for this function:

ALWAYS_INLINE void anonymous_namespace{ResultSetReduction.cpp}::fill_empty_key_64 ( int64_t *  key_ptr_i64,
const size_t  key_count 
)

Definition at line 98 of file ResultSetReduction.cpp.

References EMPTY_KEY_64.

Referenced by fill_empty_key(), and ResultSetStorage::initializeRowWise().

98  {
99  for (size_t i = 0; i < key_count; ++i) {
100  key_ptr_i64[i] = EMPTY_KEY_64;
101  }
102 }
#define EMPTY_KEY_64

+ Here is the caller graph for this function:

void anonymous_namespace{ResultSetReduction.cpp}::fill_slots ( int64_t *  dst_entry,
const size_t  dst_entry_count,
const int64_t *  src_buff,
const size_t  src_entry_idx,
const size_t  src_entry_count,
const QueryMemoryDescriptor query_mem_desc 
)

Definition at line 67 of file ResultSetReduction.cpp.

References QueryMemoryDescriptor::didOutputColumnar(), get_row_qw_count(), get_slot_off_quad(), QueryMemoryDescriptor::getBufferColSlotCount(), QueryMemoryDescriptor::getGroupbyColCount(), and slot_offset_colwise().

Referenced by get_matching_group_value_reduction(), ResultSetStorage::moveOneEntryToBuffer(), and ResultSetStorage::reduceOneEntryBaseline().

72  {
73  const auto slot_count = query_mem_desc.getBufferColSlotCount();
74  const auto key_count = query_mem_desc.getGroupbyColCount();
75  if (query_mem_desc.didOutputColumnar()) {
76  for (size_t i = 0, dst_slot_off = 0; i < slot_count;
77  ++i, dst_slot_off += dst_entry_count) {
78  dst_entry[dst_slot_off] =
79  src_buff[slot_offset_colwise(src_entry_idx, i, key_count, src_entry_count)];
80  }
81  } else {
82  const auto row_ptr = src_buff + get_row_qw_count(query_mem_desc) * src_entry_idx;
83  const auto slot_off_quad = get_slot_off_quad(query_mem_desc);
84  for (size_t i = 0; i < slot_count; ++i) {
85  dst_entry[i] = row_ptr[slot_off_quad + i];
86  }
87  }
88 }
size_t slot_offset_colwise(const size_t entry_idx, const size_t slot_idx, const size_t key_count, const size_t entry_count)
size_t get_slot_off_quad(const QueryMemoryDescriptor &query_mem_desc)
size_t getGroupbyColCount() const
size_t get_row_qw_count(const QueryMemoryDescriptor &query_mem_desc)
size_t getBufferColSlotCount() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t anonymous_namespace{ResultSetReduction.cpp}::get_component ( const int8_t *  group_by_buffer,
const size_t  comp_sz,
const size_t  index = 0 
)
inline

Definition at line 104 of file ResultSetReduction.cpp.

References CHECK.

Referenced by ResultSetStorage::reduceSingleRow().

106  {
107  int64_t ret = std::numeric_limits<int64_t>::min();
108  switch (comp_sz) {
109  case 1: {
110  ret = group_by_buffer[index];
111  break;
112  }
113  case 2: {
114  const int16_t* buffer_ptr = reinterpret_cast<const int16_t*>(group_by_buffer);
115  ret = buffer_ptr[index];
116  break;
117  }
118  case 4: {
119  const int32_t* buffer_ptr = reinterpret_cast<const int32_t*>(group_by_buffer);
120  ret = buffer_ptr[index];
121  break;
122  }
123  case 8: {
124  const int64_t* buffer_ptr = reinterpret_cast<const int64_t*>(group_by_buffer);
125  ret = buffer_ptr[index];
126  break;
127  }
128  default:
129  CHECK(false);
130  }
131  return ret;
132 }
#define CHECK(condition)
Definition: Logger.h:187

+ Here is the caller graph for this function:

GroupValueInfo anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 716 of file ResultSetReduction.cpp.

References get_matching_group_value_columnar_reduction(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::reduceOneEntryBaseline().

720  {
721  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
724  if (matching_gvi.first) {
725  return matching_gvi;
726  }
727  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
728  while (h_probe != h) {
731  if (matching_gvi.first) {
732  return matching_gvi;
733  }
734  h_probe = (h_probe + 1) % groups_buffer_entry_count;
735  }
736  return {nullptr, true};
737 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count
GroupValueInfo get_matching_group_value_columnar_reduction(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GroupValueInfo anonymous_namespace{ResultSetReduction.cpp}::get_matching_group_value_columnar_reduction ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 690 of file ResultSetReduction.cpp.

References EMPTY_KEY_64, and key_qw_count.

Referenced by get_group_value_columnar_reduction().

694  {
695  auto off = h;
696  const auto old_key =
697  __sync_val_compare_and_swap(&groups_buffer[off], EMPTY_KEY_64, *key);
698  if (old_key == EMPTY_KEY_64) {
699  for (size_t i = 0; i < key_qw_count; ++i) {
700  groups_buffer[off] = key[i];
701  off += entry_count;
702  }
703  return {&groups_buffer[off], true};
704  }
705  off = h;
706  for (size_t i = 0; i < key_qw_count; ++i) {
707  if (groups_buffer[off] != key[i]) {
708  return {nullptr, true};
709  }
710  off += entry_count;
711  }
712  return {&groups_buffer[off], false};
713 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the caller graph for this function:

template<typename T = int64_t>
GroupValueInfo anonymous_namespace{ResultSetReduction.cpp}::get_matching_group_value_reduction ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const QueryMemoryDescriptor query_mem_desc,
const int64_t *  that_buff_i64,
const size_t  that_entry_idx,
const size_t  that_entry_count,
const uint32_t  row_size_quad 
)

Definition at line 746 of file ResultSetReduction.cpp.

References cas_cst, fill_slots(), get_slot_off_quad(), QueryMemoryDescriptor::getEntryCount(), load_cst, and store_cst.

Referenced by get_group_value_reduction(), and get_matching_group_value_reduction().

755  {
756  auto off = h * row_size_quad;
757  T empty_key = get_empty_key<T>();
758  T write_pending = get_empty_key<T>() - 1;
759  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
760  const auto slot_off_quad = get_slot_off_quad(query_mem_desc);
761  const bool success = cas_cst(row_ptr, &empty_key, write_pending);
762  if (success) {
763  fill_slots(groups_buffer + off + slot_off_quad,
764  query_mem_desc.getEntryCount(),
765  that_buff_i64,
766  that_entry_idx,
767  that_entry_count,
768  query_mem_desc);
769  if (key_count > 1) {
770  memcpy(row_ptr + 1, key + 1, (key_count - 1) * sizeof(T));
771  }
772  store_cst(row_ptr, *key);
773  return {groups_buffer + off + slot_off_quad, true};
774  }
775  while (load_cst(row_ptr) == write_pending) {
776  // spin until the winning thread has finished writing the entire key and the init
777  // value
778  }
779  for (size_t i = 0; i < key_count; ++i) {
780  if (load_cst(row_ptr + i) != key[i]) {
781  return {nullptr, true};
782  }
783  }
784  return {groups_buffer + off + slot_off_quad, false};
785 }
const int32_t groups_buffer_size return groups_buffer
void fill_slots(int64_t *dst_entry, const size_t dst_entry_count, const int64_t *src_buff, const size_t src_entry_idx, const size_t src_entry_count, const QueryMemoryDescriptor &query_mem_desc)
size_t get_slot_off_quad(const QueryMemoryDescriptor &query_mem_desc)
#define store_cst(ptr, val)
#define cas_cst(ptr, expected, desired)
#define load_cst(ptr)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GroupValueInfo anonymous_namespace{ResultSetReduction.cpp}::get_matching_group_value_reduction ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const size_t  key_width,
const QueryMemoryDescriptor query_mem_desc,
const int64_t *  that_buff_i64,
const size_t  that_entry_idx,
const size_t  that_entry_count,
const uint32_t  row_size_quad 
)
inline

Definition at line 791 of file ResultSetReduction.cpp.

References CHECK, and get_matching_group_value_reduction().

801  {
802  switch (key_width) {
803  case 4:
805  h,
806  reinterpret_cast<const int32_t*>(key),
807  key_count,
808  query_mem_desc,
809  that_buff_i64,
810  that_entry_idx,
811  that_entry_count,
812  row_size_quad);
813  case 8:
815  h,
816  key,
817  key_count,
818  query_mem_desc,
819  that_buff_i64,
820  that_entry_idx,
821  that_entry_count,
822  row_size_quad);
823  default:
824  CHECK(false);
825  return {nullptr, true};
826  }
827 }
const int32_t groups_buffer_size return groups_buffer
GroupValueInfo get_matching_group_value_reduction(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const QueryMemoryDescriptor &query_mem_desc, const int64_t *that_buff_i64, const size_t that_entry_idx, const size_t that_entry_count, const uint32_t row_size_quad)
#define CHECK(condition)
Definition: Logger.h:187

+ Here is the call graph for this function:

size_t anonymous_namespace{ResultSetReduction.cpp}::get_row_qw_count ( const QueryMemoryDescriptor query_mem_desc)

Definition at line 49 of file ResultSetReduction.cpp.

References CHECK_EQ, and get_row_bytes().

Referenced by fill_slots(), ResultSetStorage::moveEntriesToBuffer(), ResultSetStorage::reduceOneEntryBaseline(), and ResultSetStorage::reduceOneEntrySlotsBaseline().

49  {
50  const auto row_bytes = get_row_bytes(query_mem_desc);
51  CHECK_EQ(size_t(0), row_bytes % 8);
52  return row_bytes / 8;
53 }
#define CHECK_EQ(x, y)
Definition: Logger.h:195
size_t get_row_bytes(const QueryMemoryDescriptor &query_mem_desc)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector<int64_t> anonymous_namespace{ResultSetReduction.cpp}::make_key ( const int64_t *  buff,
const size_t  entry_count,
const size_t  key_count 
)

Definition at line 55 of file ResultSetReduction.cpp.

Referenced by ResultSetStorage::moveOneEntryToBuffer(), and ResultSetStorage::reduceOneEntryBaseline().

57  {
58  std::vector<int64_t> key;
59  size_t off = 0;
60  for (size_t i = 0; i < key_count; ++i) {
61  key.push_back(buff[off]);
62  off += entry_count;
63  }
64  return key;
65 }

+ Here is the caller graph for this function:

void anonymous_namespace{ResultSetReduction.cpp}::run_reduction_code ( const ReductionCode reduction_code,
int8_t *  this_buff,
const int8_t *  that_buff,
const int32_t  start_entry_index,
const int32_t  end_entry_index,
const int32_t  that_entry_count,
const void *  this_qmd,
const void *  that_qmd,
const void *  serialized_varlen_buffer 
)

Definition at line 134 of file ResultSetReduction.cpp.

References ReductionCode::func_ptr, ReductionInterpreter::EvalValue::int_val, ReductionCode::ir_reduce_loop, ReductionInterpreter::EvalValue::ptr, and ReductionInterpreter::run().

Referenced by ResultSetStorage::reduce().

142  {
143  int err = 0;
144  if (reduction_code.func_ptr) {
145  err = reduction_code.func_ptr(this_buff,
146  that_buff,
147  start_entry_index,
148  end_entry_index,
149  that_entry_count,
150  this_qmd,
151  that_qmd,
152  serialized_varlen_buffer);
153  } else {
154  auto ret = ReductionInterpreter::run(
155  reduction_code.ir_reduce_loop.get(),
156  {ReductionInterpreter::EvalValue{.ptr = this_buff},
157  ReductionInterpreter::EvalValue{.ptr = that_buff},
158  ReductionInterpreter::EvalValue{.int_val = start_entry_index},
159  ReductionInterpreter::EvalValue{.int_val = end_entry_index},
160  ReductionInterpreter::EvalValue{.int_val = that_entry_count},
163  ReductionInterpreter::EvalValue{.ptr = serialized_varlen_buffer}});
164  err = ret.int_val;
165  }
166  if (err) {
167  throw std::runtime_error(
168  "Query execution has exceeded the time limit or was interrupted during result "
169  "set reduction");
170  }
171 }
std::unique_ptr< Function > ir_reduce_loop
static EvalValue run(const Function *function, const std::vector< EvalValue > &inputs)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool anonymous_namespace{ResultSetReduction.cpp}::use_multithreaded_reduction ( const size_t  entry_count)

Definition at line 45 of file ResultSetReduction.cpp.

Referenced by ResultSetStorage::moveEntriesToBuffer(), and ResultSetStorage::reduce().

45  {
46  return entry_count > 100000;
47 }

+ Here is the caller graph for this function: