OmniSciDB  340b00dbf6
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
StorageIOFacility.h
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <future>
20 
22 #include "LockMgr/LockMgr.h"
27 #include "Shared/UpdelRoll.h"
28 #include "Shared/likely.h"
29 #include "Shared/thread_count.h"
30 
31 template <typename EXECUTOR_TRAITS, typename FRAGMENT_UPDATER = UpdateLogForFragment>
33  public:
35  using CatalogType = typename EXECUTOR_TRAITS::CatalogType;
36  using FragmentUpdaterType = FRAGMENT_UPDATER;
38 
39  using TableDescriptorType = typename EXECUTOR_TRAITS::TableDescriptorType;
40  using DeleteVictimOffsetList = std::vector<uint64_t>;
41  using UpdateTargetOffsetList = std::vector<uint64_t>;
42  using UpdateTargetTypeList = std::vector<TargetMetaInfo>;
43  using UpdateTargetColumnNamesList = std::vector<std::string>;
44 
47  using TransactionLogPtr = std::unique_ptr<TransactionLog>;
48  using ColumnValidationFunction = std::function<bool(std::string const&)>;
49 
51  public:
53  return transaction_tracker_;
54  }
55  void finalizeTransaction() { transaction_tracker_.commitUpdate(); }
56 
57  private:
59  };
60 
62  public:
64  : table_is_temporary_(table_is_temporary) {}
65 
66  auto tableIsTemporary() const { return table_is_temporary_; }
67 
68  private:
71  delete;
72 
74  };
75 
77  public:
79  UpdateTargetColumnNamesList const& update_column_names,
80  UpdateTargetTypeList const& target_types,
81  bool varlen_update_required)
82  : table_descriptor_(table_desc)
83  , update_column_names_(update_column_names)
84  , targets_meta_(target_types)
85  , varlen_update_required_(varlen_update_required)
87 
88  auto getUpdateColumnCount() const { return update_column_names_.size(); }
89  auto const* getTableDescriptor() const { return table_descriptor_; }
90  auto const& getTargetsMetaInfo() const { return targets_meta_; }
91  auto getTargetsMetaInfoSize() const { return targets_meta_.size(); }
92  auto const& getUpdateColumnNames() const { return update_column_names_; }
94  auto tableIsTemporary() const { return table_is_temporary_; }
95 
96  private:
99  delete;
100 
106  };
107 
108  StorageIOFacility(ExecutorType* executor, CatalogType const& catalog)
109  : executor_(executor), catalog_(catalog) {}
110 
111  UpdateCallback yieldUpdateCallback(UpdateTransactionParameters& update_parameters);
112  UpdateCallback yieldDeleteCallback(DeleteTransactionParameters& delete_parameters);
113 
114  private:
115  int normalized_cpu_threads() const { return cpu_threads() / 2; }
116  static std::unique_ptr<int8_t[]> getRsBufferNoPadding(const ResultSet* rs,
117  size_t col_idx,
118  const SQLTypeInfo& column_type,
119  size_t row_count);
120 
123 };
124 
125 template <typename EXECUTOR_TRAITS, typename FRAGMENT_UPDATER>
128  UpdateTransactionParameters& update_parameters) {
129  using OffsetVector = std::vector<uint64_t>;
130  using ScalarTargetValueVector = std::vector<ScalarTargetValue>;
131  using RowProcessingFuturesVector = std::vector<std::future<uint64_t>>;
132 
133  if (update_parameters.isVarlenUpdateRequired()) {
134  auto callback = [this,
135  &update_parameters](FragmentUpdaterType const& update_log) -> void {
136  std::vector<const ColumnDescriptor*> columnDescriptors;
137  std::vector<TargetMetaInfo> sourceMetaInfos;
138 
139  for (size_t idx = 0; idx < update_parameters.getUpdateColumnNames().size(); idx++) {
140  auto& column_name = update_parameters.getUpdateColumnNames()[idx];
141  auto target_column =
142  catalog_.getMetadataForColumn(update_log.getPhysicalTableId(), column_name);
143  columnDescriptors.push_back(target_column);
144  sourceMetaInfos.push_back(update_parameters.getTargetsMetaInfo()[idx]);
145  }
146 
147  auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
148  auto* fragmenter = td->fragmenter.get();
149  CHECK(fragmenter);
150 
151  fragmenter->updateColumns(
152  &catalog_,
153  td,
154  update_log.getFragmentId(),
155  sourceMetaInfos,
156  columnDescriptors,
157  update_log,
158  update_parameters.getUpdateColumnCount(), // last column of result set
160  update_parameters.getTransactionTracker(),
161  executor_);
162  };
163  return callback;
164  } else if (update_parameters.tableIsTemporary()) {
165  auto callback = [this,
166  &update_parameters](FragmentUpdaterType const& update_log) -> void {
167  auto rs = update_log.getResultSet();
168  CHECK(rs->didOutputColumnar());
169  CHECK(rs->isDirectColumnarConversionPossible());
170  CHECK_EQ(update_parameters.getUpdateColumnCount(), size_t(1));
171  CHECK_EQ(rs->colCount(), size_t(1));
172 
173  // Temporary table updates require the full projected column
174  CHECK_EQ(rs->rowCount(), update_log.getRowCount());
175 
176  ChunkKey chunk_key_prefix{catalog_.getCurrentDB().dbId,
177  update_parameters.getTableDescriptor()->tableId};
178  const auto table_lock =
180 
181  auto& fragment_info = update_log.getFragmentInfo();
182  const auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
183  CHECK(td);
184  const auto cd = catalog_.getMetadataForColumn(
185  td->tableId, update_parameters.getUpdateColumnNames().front());
186  CHECK(cd);
187  auto chunk_metadata =
188  fragment_info.getChunkMetadataMapPhysical().find(cd->columnId);
189  CHECK(chunk_metadata != fragment_info.getChunkMetadataMapPhysical().end());
190  ChunkKey chunk_key{catalog_.getCurrentDB().dbId,
191  td->tableId,
192  cd->columnId,
193  fragment_info.fragmentId};
194  auto chunk = Chunk_NS::Chunk::getChunk(cd,
195  &catalog_.getDataMgr(),
196  chunk_key,
198  0,
199  chunk_metadata->second->numBytes,
200  chunk_metadata->second->numElements);
201  CHECK(chunk);
202  auto chunk_buffer = chunk->getBuffer();
203  CHECK(chunk_buffer);
204 
205  auto encoder = chunk_buffer->getEncoder();
206  CHECK(encoder);
207 
208  auto owned_buffer =
210  rs.get(), 0, cd->columnType, rs->rowCount());
211  auto buffer = reinterpret_cast<int8_t*>(owned_buffer.get());
212 
213  const auto new_chunk_metadata =
214  encoder->appendData(buffer, rs->rowCount(), cd->columnType, false, 0);
215  CHECK(new_chunk_metadata);
216 
217  auto fragmenter = td->fragmenter.get();
218  CHECK(fragmenter);
219 
220  // The fragmenter copy of the fragment info differs from the copy used by the query
221  // engine. Update metadata in the fragmenter directly.
222  auto fragment = fragmenter->getFragmentInfo(fragment_info.fragmentId);
223  // TODO: we may want to put this directly in the fragmenter so we are under the
224  // fragmenter lock. But, concurrent queries on the same fragmenter should not be
225  // allowed in this path.
226 
227  fragment->setChunkMetadata(cd->columnId, new_chunk_metadata);
228  fragment->shadowChunkMetadataMap =
229  fragment->getChunkMetadataMap(); // TODO(adb): needed?
230 
231  auto& data_mgr = catalog_.getDataMgr();
232  if (data_mgr.gpusPresent()) {
233  // flush any GPU copies of the updated chunk
234  data_mgr.deleteChunksWithPrefix(chunk_key,
236  }
237  };
238  return callback;
239  } else {
240  auto callback = [this,
241  &update_parameters](FragmentUpdaterType const& update_log) -> void {
242  auto entries_per_column = update_log.getEntryCount();
243  auto rows_per_column = update_log.getRowCount();
244  if (rows_per_column == 0) {
245  return;
246  }
247 
248  OffsetVector column_offsets(rows_per_column);
249  ScalarTargetValueVector scalar_target_values(rows_per_column);
250 
251  auto complete_entry_block_size = entries_per_column / normalized_cpu_threads();
252  auto partial_row_block_size = entries_per_column % normalized_cpu_threads();
253  auto usable_threads = normalized_cpu_threads();
254  if (UNLIKELY(rows_per_column < (unsigned)normalized_cpu_threads())) {
255  complete_entry_block_size = entries_per_column;
256  partial_row_block_size = 0;
257  usable_threads = 1;
258  }
259 
260  std::atomic<size_t> row_idx{0};
261 
262  auto process_rows =
263  [&update_parameters, &column_offsets, &scalar_target_values, &row_idx](
264  auto get_entry_at_func,
265  uint64_t column_index,
266  uint64_t entry_start,
267  uint64_t entry_count) -> uint64_t {
268  uint64_t entries_processed = 0;
269  for (uint64_t entry_index = entry_start;
270  entry_index < (entry_start + entry_count);
271  entry_index++) {
272  const auto& row = get_entry_at_func(entry_index);
273  if (row.empty()) {
274  continue;
275  }
276 
277  entries_processed++;
278  size_t row_index = row_idx.fetch_add(1);
279 
280  CHECK(row.size() == update_parameters.getUpdateColumnCount() + 1);
281 
282  auto terminal_column_iter = std::prev(row.end());
283  const auto frag_offset_scalar_tv =
284  boost::get<ScalarTargetValue>(&*terminal_column_iter);
285  CHECK(frag_offset_scalar_tv);
286 
287  column_offsets[row_index] =
288  static_cast<uint64_t>(*(boost::get<int64_t>(frag_offset_scalar_tv)));
289  scalar_target_values[row_index] =
290  boost::get<ScalarTargetValue>(row[column_index]);
291  }
292  return entries_processed;
293  };
294 
295  auto get_row_index =
296  [complete_entry_block_size](uint64_t thread_index) -> uint64_t {
297  return (thread_index * complete_entry_block_size);
298  };
299 
300  // Iterate over each column
301  for (decltype(update_parameters.getUpdateColumnCount()) column_index = 0;
302  column_index < update_parameters.getUpdateColumnCount();
303  column_index++) {
304  row_idx = 0;
305  RowProcessingFuturesVector entry_processing_futures;
306  entry_processing_futures.reserve(usable_threads);
307 
308  auto get_entry_at_func = [&update_log, &column_index](const size_t entry_index) {
309  if (UNLIKELY(update_log.getColumnType(column_index).is_string())) {
310  return update_log.getTranslatedEntryAt(entry_index);
311  } else {
312  return update_log.getEntryAt(entry_index);
313  }
314  };
315 
316  for (unsigned i = 0; i < static_cast<unsigned>(usable_threads); i++) {
317  entry_processing_futures.emplace_back(
318  std::async(std::launch::async,
319  std::forward<decltype(process_rows)>(process_rows),
320  get_entry_at_func,
321  column_index,
322  get_row_index(i),
323  complete_entry_block_size));
324  }
325  if (partial_row_block_size) {
326  entry_processing_futures.emplace_back(
327  std::async(std::launch::async,
328  std::forward<decltype(process_rows)>(process_rows),
329  get_entry_at_func,
330  column_index,
331  get_row_index(usable_threads),
332  partial_row_block_size));
333  }
334 
335  uint64_t entries_processed(0);
336  for (auto& t : entry_processing_futures) {
337  t.wait();
338  entries_processed += t.get();
339  }
340 
341  CHECK(row_idx == rows_per_column);
342 
343  const auto table_id = update_log.getPhysicalTableId();
344  auto const* table_descriptor =
345  catalog_.getMetadataForTable(update_log.getPhysicalTableId());
346  CHECK(table_descriptor);
347  const auto fragmenter = table_descriptor->fragmenter;
348  CHECK(fragmenter);
349  auto const* target_column = catalog_.getMetadataForColumn(
350  table_id, update_parameters.getUpdateColumnNames()[column_index]);
351 
352  fragmenter->updateColumn(&catalog_,
353  table_descriptor,
354  target_column,
355  update_log.getFragmentId(),
356  column_offsets,
357  scalar_target_values,
358  update_log.getColumnType(column_index),
360  update_parameters.getTransactionTracker());
361  }
362  };
363  return callback;
364  }
365 }
366 
367 template <typename EXECUTOR_TRAITS, typename FRAGMENT_UPDATER>
370  DeleteTransactionParameters& delete_parameters) {
371  using RowProcessingFuturesVector = std::vector<std::future<uint64_t>>;
372 
373  if (delete_parameters.tableIsTemporary()) {
374  auto callback = [this](FragmentUpdaterType const& update_log) -> void {
375  auto rs = update_log.getResultSet();
376  CHECK(rs->didOutputColumnar());
377  CHECK(rs->isDirectColumnarConversionPossible());
378  CHECK_EQ(rs->colCount(), size_t(1));
379 
380  // Temporary table updates require the full projected column
381  CHECK_EQ(rs->rowCount(), update_log.getRowCount());
382 
383  ChunkKey chunk_key_prefix{catalog_.getCurrentDB().dbId,
384  update_log.getPhysicalTableId()};
385  const auto table_lock =
387 
388  auto& fragment_info = update_log.getFragmentInfo();
389  const auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
390  CHECK(td);
391  const auto cd = catalog_.getDeletedColumn(td);
392  CHECK(cd);
393  CHECK(cd->columnType.get_type() == kBOOLEAN);
394  auto chunk_metadata =
395  fragment_info.getChunkMetadataMapPhysical().find(cd->columnId);
396  CHECK(chunk_metadata != fragment_info.getChunkMetadataMapPhysical().end());
397  ChunkKey chunk_key{catalog_.getCurrentDB().dbId,
398  td->tableId,
399  cd->columnId,
400  fragment_info.fragmentId};
401  auto chunk = Chunk_NS::Chunk::getChunk(cd,
402  &catalog_.getDataMgr(),
403  chunk_key,
405  0,
406  chunk_metadata->second->numBytes,
407  chunk_metadata->second->numElements);
408  CHECK(chunk);
409  auto chunk_buffer = chunk->getBuffer();
410  CHECK(chunk_buffer);
411 
412  auto encoder = chunk_buffer->getEncoder();
413  CHECK(encoder);
414 
415  auto owned_buffer =
417  rs.get(), 0, cd->columnType, rs->rowCount());
418  auto buffer = reinterpret_cast<int8_t*>(owned_buffer.get());
419 
420  const auto new_chunk_metadata =
421  encoder->appendData(buffer, rs->rowCount(), cd->columnType, false, 0);
422 
423  auto fragmenter = td->fragmenter.get();
424  CHECK(fragmenter);
425 
426  // The fragmenter copy of the fragment info differs from the copy used by the query
427  // engine. Update metadata in the fragmenter directly.
428  auto fragment = fragmenter->getFragmentInfo(fragment_info.fragmentId);
429  // TODO: we may want to put this directly in the fragmenter so we are under the
430  // fragmenter lock. But, concurrent queries on the same fragmenter should not be
431  // allowed in this path.
432 
433  fragment->setChunkMetadata(cd->columnId, new_chunk_metadata);
434  fragment->shadowChunkMetadataMap =
435  fragment->getChunkMetadataMap(); // TODO(adb): needed?
436 
437  auto& data_mgr = catalog_.getDataMgr();
438  if (data_mgr.gpusPresent()) {
439  // flush any GPU copies of the updated chunk
440  data_mgr.deleteChunksWithPrefix(chunk_key,
442  }
443  };
444  return callback;
445  } else {
446  auto callback = [this,
447  &delete_parameters](FragmentUpdaterType const& update_log) -> void {
448  auto entries_per_column = update_log.getEntryCount();
449  auto rows_per_column = update_log.getRowCount();
450  if (rows_per_column == 0) {
451  return;
452  }
453  DeleteVictimOffsetList victim_offsets(rows_per_column);
454 
455  auto complete_row_block_size = entries_per_column / normalized_cpu_threads();
456  auto partial_row_block_size = entries_per_column % normalized_cpu_threads();
457  auto usable_threads = normalized_cpu_threads();
458 
459  if (UNLIKELY(rows_per_column < (unsigned)normalized_cpu_threads())) {
460  complete_row_block_size = rows_per_column;
461  partial_row_block_size = 0;
462  usable_threads = 1;
463  }
464 
465  std::atomic<size_t> row_idx{0};
466 
467  auto process_rows = [&update_log, &victim_offsets, &row_idx](
468  uint64_t entry_start, uint64_t entry_count) -> uint64_t {
469  uint64_t entries_processed = 0;
470 
471  for (uint64_t entry_index = entry_start;
472  entry_index < (entry_start + entry_count);
473  entry_index++) {
474  auto const row(update_log.getEntryAt(entry_index));
475 
476  if (row.empty()) {
477  continue;
478  }
479 
480  entries_processed++;
481  size_t row_index = row_idx.fetch_add(1);
482 
483  auto terminal_column_iter = std::prev(row.end());
484  const auto scalar_tv = boost::get<ScalarTargetValue>(&*terminal_column_iter);
485  CHECK(scalar_tv);
486 
487  uint64_t fragment_offset =
488  static_cast<uint64_t>(*(boost::get<int64_t>(scalar_tv)));
489  victim_offsets[row_index] = fragment_offset;
490  }
491  return entries_processed;
492  };
493 
494  auto get_row_index = [complete_row_block_size](uint64_t thread_index) -> uint64_t {
495  return thread_index * complete_row_block_size;
496  };
497 
498  RowProcessingFuturesVector row_processing_futures;
499  row_processing_futures.reserve(usable_threads);
500 
501  for (unsigned i = 0; i < (unsigned)usable_threads; i++) {
502  row_processing_futures.emplace_back(
503  std::async(std::launch::async,
504  std::forward<decltype(process_rows)>(process_rows),
505  get_row_index(i),
506  complete_row_block_size));
507  }
508  if (partial_row_block_size) {
509  row_processing_futures.emplace_back(
510  std::async(std::launch::async,
511  std::forward<decltype(process_rows)>(process_rows),
512  get_row_index(usable_threads),
513  partial_row_block_size));
514  }
515 
516  uint64_t rows_processed(0);
517  for (auto& t : row_processing_futures) {
518  t.wait();
519  rows_processed += t.get();
520  }
521 
522  auto const* table_descriptor =
523  catalog_.getMetadataForTable(update_log.getPhysicalTableId());
524  CHECK(!table_is_temporary(table_descriptor));
525  auto* fragmenter = table_descriptor->fragmenter.get();
526  CHECK(fragmenter);
527 
528  auto const* deleted_column_desc = catalog_.getDeletedColumn(table_descriptor);
529  CHECK(deleted_column_desc);
530  fragmenter->updateColumn(&catalog_,
531  table_descriptor,
532  deleted_column_desc,
533  update_log.getFragmentId(),
534  victim_offsets,
535  ScalarTargetValue(int64_t(1L)),
536  update_log.getColumnType(0),
538  delete_parameters.getTransactionTracker());
539  };
540  return callback;
541  }
542 }
543 
544 template <typename EXECUTOR_TRAITS, typename FRAGMENT_UPDATER>
545 std::unique_ptr<int8_t[]>
547  const ResultSet* rs,
548  size_t col_idx,
549  const SQLTypeInfo& column_type,
550  size_t row_count) {
551  const auto padded_size = rs->getPaddedSlotWidthBytes(col_idx);
552  const auto type_size = column_type.is_dict_encoded_string()
553  ? column_type.get_size()
554  : column_type.get_logical_size();
555 
556  auto rs_buffer_size = padded_size * row_count;
557  auto rs_buffer = std::make_unique<int8_t[]>(rs_buffer_size);
558  rs->copyColumnIntoBuffer(col_idx, rs_buffer.get(), rs_buffer_size);
559 
560  if (type_size < padded_size) {
561  // else we're going to remove padding and we do it inplace in the same buffer
562  // we can do updates inplace in the same buffer because type_size < padded_size
563  // for some types, like kFLOAT, simple memcpy is not enough
564  auto src_ptr = rs_buffer.get();
565  auto dst_ptr = rs_buffer.get();
566  if (column_type.is_fp()) {
567  CHECK(column_type.get_type() == kFLOAT);
568  CHECK(padded_size == sizeof(double));
569  for (size_t i = 0; i < row_count; i++) {
570  const auto old_val = *reinterpret_cast<double*>(may_alias_ptr(src_ptr));
571  auto new_val = static_cast<float>(old_val);
572  std::memcpy(dst_ptr, &new_val, type_size);
573  dst_ptr += type_size;
574  src_ptr += padded_size;
575  }
576  } else {
577  // otherwise just take first type_size bytes from the padded value
578  for (size_t i = 0; i < row_count; i++) {
579  std::memcpy(dst_ptr, src_ptr, type_size);
580  dst_ptr += type_size;
581  src_ptr += padded_size;
582  }
583  }
584  }
585  return rs_buffer;
586 }
StorageIOFacility(ExecutorType *executor, CatalogType const &catalog)
catalog_(nullptr)
std::vector< std::string > UpdateTargetColumnNamesList
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::vector< int > ChunkKey
Definition: types.h:37
UpdateCallback yieldUpdateCallback(UpdateTransactionParameters &update_parameters)
HOST DEVICE int get_size() const
Definition: sqltypes.h:340
static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:155
static std::unique_ptr< int8_t[]> getRsBufferNoPadding(const ResultSet *rs, size_t col_idx, const SQLTypeInfo &column_type, size_t row_count)
bool is_fp() const
Definition: sqltypes.h:491
std::vector< uint64_t > UpdateTargetOffsetList
StorageIOFacility::TransactionLog & getTransactionTracker()
UpdateTransactionParameters & operator=(UpdateTransactionParameters const &other)=delete
The InsertOrderFragmenter is a child class of AbstractFragmenter, and fragments data in insert order...
UpdateTransactionParameters(TableDescriptorType const *table_desc, UpdateTargetColumnNamesList const &update_column_names, UpdateTargetTypeList const &target_types, bool varlen_update_required)
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:330
DeleteTransactionParameters & operator=(DeleteTransactionParameters const &other)=delete
std::function< void(const UpdateLogForFragment &)> Callback
Definition: Execute.h:313
DeleteTransactionParameters(const bool table_is_temporary)
ExecutorType
std::vector< TargetMetaInfo > UpdateTargetTypeList
CatalogType const & catalog_
int normalized_cpu_threads() const
int get_logical_size() const
Definition: sqltypes.h:341
typename FragmenterType::ModifyTransactionTracker TransactionLog
typename RelAlgExecutorTraits::CatalogType CatalogType
UpdateCallback yieldDeleteCallback(DeleteTransactionParameters &delete_parameters)
static std::shared_ptr< Chunk > getChunk(const ColumnDescriptor *cd, DataMgr *data_mgr, const ChunkKey &key, const MemoryLevel mem_level, const int deviceId, const size_t num_bytes, const size_t num_elems)
Definition: Chunk.cpp:28
ExecutorType * executor_
#define UNLIKELY(x)
Definition: likely.h:25
typename FragmentUpdaterType::Callback UpdateCallback
FRAGMENT_UPDATER FragmentUpdaterType
bool table_is_temporary(const TableDescriptor *const td)
typename RelAlgExecutorTraits::TableDescriptorType TableDescriptorType
#define CHECK(condition)
Definition: Logger.h:197
std::function< bool(std::string const &)> ColumnValidationFunction
bool is_dict_encoded_string() const
Definition: sqltypes.h:518
StorageIOFacility::TransactionLog transaction_tracker_
int cpu_threads()
Definition: thread_count.h:24
typename RelAlgExecutorTraits::ExecutorType ExecutorType
std::vector< uint64_t > DeleteVictimOffsetList
std::unique_ptr< TransactionLog > TransactionLogPtr
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156