OmniSciDB  94e8789169
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
StorageIOFacility.h
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <future>
20 
22 #include "LockMgr/LockMgr.h"
24 #include "QueryEngine/Execute.h"
28 #include "Shared/UpdelRoll.h"
29 #include "Shared/likely.h"
30 #include "Shared/thread_count.h"
31 
33  public:
35 
37  using DeleteVictimOffsetList = std::vector<uint64_t>;
38  using UpdateTargetOffsetList = std::vector<uint64_t>;
39  using UpdateTargetTypeList = std::vector<TargetMetaInfo>;
40  using UpdateTargetColumnNamesList = std::vector<std::string>;
41 
42  using TransactionLog =
44  using TransactionLogPtr = std::unique_ptr<TransactionLog>;
45  using ColumnValidationFunction = std::function<bool(std::string const&)>;
46 
48  public:
50  : table_is_temporary_(table_is_temporary) {}
51 
52  virtual ~TransactionParameters() = default;
53 
55  return transaction_tracker_;
56  }
58 
59  auto tableIsTemporary() const { return table_is_temporary_; }
60 
61  protected:
63 
64  private:
66  };
67 
69  public:
71  : TransactionParameters(table_is_temporary) {}
72 
73  auto tableIsTemporary() const { return table_is_temporary_; }
74 
75  private:
78  delete;
79  };
80 
82  public:
84  UpdateTargetColumnNamesList const& update_column_names,
85  UpdateTargetTypeList const& target_types,
86  bool varlen_update_required)
88  , table_descriptor_(table_desc)
89  , update_column_names_(update_column_names)
90  , targets_meta_(target_types)
91  , varlen_update_required_(varlen_update_required) {}
92 
93  auto getUpdateColumnCount() const { return update_column_names_.size(); }
94  auto const* getTableDescriptor() const { return table_descriptor_; }
95  auto const& getTargetsMetaInfo() const { return targets_meta_; }
96  auto getTargetsMetaInfoSize() const { return targets_meta_.size(); }
97  auto const& getUpdateColumnNames() const { return update_column_names_; }
99 
100  private:
103  delete;
104 
109  };
110 
111  StorageIOFacility(Executor* executor, Catalog_Namespace::Catalog const& catalog)
112  : executor_(executor), catalog_(catalog) {}
113 
115  UpdateTransactionParameters& update_parameters) {
116  using OffsetVector = std::vector<uint64_t>;
117  using ScalarTargetValueVector = std::vector<ScalarTargetValue>;
118  using RowProcessingFuturesVector = std::vector<std::future<uint64_t>>;
119 
120  if (update_parameters.isVarlenUpdateRequired()) {
121  auto callback =
122  [this, &update_parameters](UpdateLogForFragment const& update_log) -> void {
123  std::vector<const ColumnDescriptor*> columnDescriptors;
124  std::vector<TargetMetaInfo> sourceMetaInfos;
125 
126  for (size_t idx = 0; idx < update_parameters.getUpdateColumnNames().size();
127  idx++) {
128  auto& column_name = update_parameters.getUpdateColumnNames()[idx];
129  auto target_column =
130  catalog_.getMetadataForColumn(update_log.getPhysicalTableId(), column_name);
131  columnDescriptors.push_back(target_column);
132  sourceMetaInfos.push_back(update_parameters.getTargetsMetaInfo()[idx]);
133  }
134 
135  auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
136  auto* fragmenter = td->fragmenter.get();
137  CHECK(fragmenter);
138 
139  fragmenter->updateColumns(
140  &catalog_,
141  td,
142  update_log.getFragmentId(),
143  sourceMetaInfos,
144  columnDescriptors,
145  update_log,
146  update_parameters.getUpdateColumnCount(), // last column of result set
148  update_parameters.getTransactionTracker(),
149  executor_);
150  };
151  return callback;
152  } else if (update_parameters.tableIsTemporary()) {
153  auto callback =
154  [this, &update_parameters](UpdateLogForFragment const& update_log) -> void {
155  auto rs = update_log.getResultSet();
156  CHECK(rs->didOutputColumnar());
157  CHECK(rs->isDirectColumnarConversionPossible());
158  CHECK_EQ(update_parameters.getUpdateColumnCount(), size_t(1));
159  CHECK_EQ(rs->colCount(), size_t(1));
160 
161  // Temporary table updates require the full projected column
162  CHECK_EQ(rs->rowCount(), update_log.getRowCount());
163 
164  ChunkKey chunk_key_prefix{catalog_.getCurrentDB().dbId,
165  update_parameters.getTableDescriptor()->tableId};
166  const auto table_lock =
168 
169  auto& fragment_info = update_log.getFragmentInfo();
170  const auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
171  CHECK(td);
172  const auto cd = catalog_.getMetadataForColumn(
173  td->tableId, update_parameters.getUpdateColumnNames().front());
174  CHECK(cd);
175  auto chunk_metadata =
176  fragment_info.getChunkMetadataMapPhysical().find(cd->columnId);
177  CHECK(chunk_metadata != fragment_info.getChunkMetadataMapPhysical().end());
178  ChunkKey chunk_key{catalog_.getCurrentDB().dbId,
179  td->tableId,
180  cd->columnId,
181  fragment_info.fragmentId};
182  auto chunk = Chunk_NS::Chunk::getChunk(cd,
183  &catalog_.getDataMgr(),
184  chunk_key,
186  0,
187  chunk_metadata->second->numBytes,
188  chunk_metadata->second->numElements);
189  CHECK(chunk);
190  auto chunk_buffer = chunk->getBuffer();
191  CHECK(chunk_buffer);
192 
193  auto encoder = chunk_buffer->getEncoder();
194  CHECK(encoder);
195 
196  auto owned_buffer = StorageIOFacility::getRsBufferNoPadding(
197  rs.get(), 0, cd->columnType, rs->rowCount());
198  auto buffer = reinterpret_cast<int8_t*>(owned_buffer.get());
199 
200  const auto new_chunk_metadata =
201  encoder->appendData(buffer, rs->rowCount(), cd->columnType, false, 0);
202  CHECK(new_chunk_metadata);
203 
204  auto fragmenter = td->fragmenter.get();
205  CHECK(fragmenter);
206 
207  // The fragmenter copy of the fragment info differs from the copy used by the
208  // query engine. Update metadata in the fragmenter directly.
209  auto fragment = fragmenter->getFragmentInfo(fragment_info.fragmentId);
210  // TODO: we may want to put this directly in the fragmenter so we are under the
211  // fragmenter lock. But, concurrent queries on the same fragmenter should not be
212  // allowed in this path.
213 
214  fragment->setChunkMetadata(cd->columnId, new_chunk_metadata);
215  fragment->shadowChunkMetadataMap =
216  fragment->getChunkMetadataMap(); // TODO(adb): needed?
217 
218  auto& data_mgr = catalog_.getDataMgr();
219  if (data_mgr.gpusPresent()) {
220  // flush any GPU copies of the updated chunk
221  data_mgr.deleteChunksWithPrefix(chunk_key,
223  }
224  };
225  return callback;
226  } else {
227  auto callback =
228  [this, &update_parameters](UpdateLogForFragment const& update_log) -> void {
229  auto entries_per_column = update_log.getEntryCount();
230  auto rows_per_column = update_log.getRowCount();
231  if (rows_per_column == 0) {
232  return;
233  }
234 
235  OffsetVector column_offsets(rows_per_column);
236  ScalarTargetValueVector scalar_target_values(rows_per_column);
237 
238  auto complete_entry_block_size = entries_per_column / normalized_cpu_threads();
239  auto partial_row_block_size = entries_per_column % normalized_cpu_threads();
240  auto usable_threads = normalized_cpu_threads();
241  if (UNLIKELY(rows_per_column < (unsigned)normalized_cpu_threads())) {
242  complete_entry_block_size = entries_per_column;
243  partial_row_block_size = 0;
244  usable_threads = 1;
245  }
246 
247  std::atomic<size_t> row_idx{0};
248 
249  auto process_rows =
250  [&update_parameters, &column_offsets, &scalar_target_values, &row_idx](
251  auto get_entry_at_func,
252  uint64_t column_index,
253  uint64_t entry_start,
254  uint64_t entry_count) -> uint64_t {
255  uint64_t entries_processed = 0;
256  for (uint64_t entry_index = entry_start;
257  entry_index < (entry_start + entry_count);
258  entry_index++) {
259  const auto& row = get_entry_at_func(entry_index);
260  if (row.empty()) {
261  continue;
262  }
263 
264  entries_processed++;
265  size_t row_index = row_idx.fetch_add(1);
266 
267  CHECK(row.size() == update_parameters.getUpdateColumnCount() + 1);
268 
269  auto terminal_column_iter = std::prev(row.end());
270  const auto frag_offset_scalar_tv =
271  boost::get<ScalarTargetValue>(&*terminal_column_iter);
272  CHECK(frag_offset_scalar_tv);
273 
274  column_offsets[row_index] =
275  static_cast<uint64_t>(*(boost::get<int64_t>(frag_offset_scalar_tv)));
276  scalar_target_values[row_index] =
277  boost::get<ScalarTargetValue>(row[column_index]);
278  }
279  return entries_processed;
280  };
281 
282  auto get_row_index =
283  [complete_entry_block_size](uint64_t thread_index) -> uint64_t {
284  return (thread_index * complete_entry_block_size);
285  };
286 
287  // Iterate over each column
288  for (decltype(update_parameters.getUpdateColumnCount()) column_index = 0;
289  column_index < update_parameters.getUpdateColumnCount();
290  column_index++) {
291  row_idx = 0;
292  RowProcessingFuturesVector entry_processing_futures;
293  entry_processing_futures.reserve(usable_threads);
294 
295  auto get_entry_at_func = [&update_log,
296  &column_index](const size_t entry_index) {
297  if (UNLIKELY(update_log.getColumnType(column_index).is_string())) {
298  return update_log.getTranslatedEntryAt(entry_index);
299  } else {
300  return update_log.getEntryAt(entry_index);
301  }
302  };
303 
304  for (unsigned i = 0; i < static_cast<unsigned>(usable_threads); i++) {
305  entry_processing_futures.emplace_back(
306  std::async(std::launch::async,
307  std::forward<decltype(process_rows)>(process_rows),
308  get_entry_at_func,
309  column_index,
310  get_row_index(i),
311  complete_entry_block_size));
312  }
313  if (partial_row_block_size) {
314  entry_processing_futures.emplace_back(
315  std::async(std::launch::async,
316  std::forward<decltype(process_rows)>(process_rows),
317  get_entry_at_func,
318  column_index,
319  get_row_index(usable_threads),
320  partial_row_block_size));
321  }
322 
323  uint64_t entries_processed(0);
324  for (auto& t : entry_processing_futures) {
325  t.wait();
326  entries_processed += t.get();
327  }
328 
329  CHECK(row_idx == rows_per_column);
330 
331  const auto table_id = update_log.getPhysicalTableId();
332  auto const* table_descriptor =
333  catalog_.getMetadataForTable(update_log.getPhysicalTableId());
334  CHECK(table_descriptor);
335  const auto fragmenter = table_descriptor->fragmenter;
336  CHECK(fragmenter);
337  auto const* target_column = catalog_.getMetadataForColumn(
338  table_id, update_parameters.getUpdateColumnNames()[column_index]);
339 
340  fragmenter->updateColumn(&catalog_,
341  table_descriptor,
342  target_column,
343  update_log.getFragmentId(),
344  column_offsets,
345  scalar_target_values,
346  update_log.getColumnType(column_index),
348  update_parameters.getTransactionTracker());
349  }
350  };
351  return callback;
352  }
353  }
354 
356  DeleteTransactionParameters& delete_parameters) {
357  using RowProcessingFuturesVector = std::vector<std::future<uint64_t>>;
358 
359  if (delete_parameters.tableIsTemporary()) {
360  auto callback = [this](UpdateLogForFragment const& update_log) -> void {
361  auto rs = update_log.getResultSet();
362  CHECK(rs->didOutputColumnar());
363  CHECK(rs->isDirectColumnarConversionPossible());
364  CHECK_EQ(rs->colCount(), size_t(1));
365 
366  // Temporary table updates require the full projected column
367  CHECK_EQ(rs->rowCount(), update_log.getRowCount());
368 
369  ChunkKey chunk_key_prefix{catalog_.getCurrentDB().dbId,
370  update_log.getPhysicalTableId()};
371  const auto table_lock =
373 
374  auto& fragment_info = update_log.getFragmentInfo();
375  const auto td = catalog_.getMetadataForTable(update_log.getPhysicalTableId());
376  CHECK(td);
377  const auto cd = catalog_.getDeletedColumn(td);
378  CHECK(cd);
379  CHECK(cd->columnType.get_type() == kBOOLEAN);
380  auto chunk_metadata =
381  fragment_info.getChunkMetadataMapPhysical().find(cd->columnId);
382  CHECK(chunk_metadata != fragment_info.getChunkMetadataMapPhysical().end());
383  ChunkKey chunk_key{catalog_.getCurrentDB().dbId,
384  td->tableId,
385  cd->columnId,
386  fragment_info.fragmentId};
387  auto chunk = Chunk_NS::Chunk::getChunk(cd,
388  &catalog_.getDataMgr(),
389  chunk_key,
391  0,
392  chunk_metadata->second->numBytes,
393  chunk_metadata->second->numElements);
394  CHECK(chunk);
395  auto chunk_buffer = chunk->getBuffer();
396  CHECK(chunk_buffer);
397 
398  auto encoder = chunk_buffer->getEncoder();
399  CHECK(encoder);
400 
401  auto owned_buffer = StorageIOFacility::getRsBufferNoPadding(
402  rs.get(), 0, cd->columnType, rs->rowCount());
403  auto buffer = reinterpret_cast<int8_t*>(owned_buffer.get());
404 
405  const auto new_chunk_metadata =
406  encoder->appendData(buffer, rs->rowCount(), cd->columnType, false, 0);
407 
408  auto fragmenter = td->fragmenter.get();
409  CHECK(fragmenter);
410 
411  // The fragmenter copy of the fragment info differs from the copy used by the
412  // query engine. Update metadata in the fragmenter directly.
413  auto fragment = fragmenter->getFragmentInfo(fragment_info.fragmentId);
414  // TODO: we may want to put this directly in the fragmenter so we are under the
415  // fragmenter lock. But, concurrent queries on the same fragmenter should not be
416  // allowed in this path.
417 
418  fragment->setChunkMetadata(cd->columnId, new_chunk_metadata);
419  fragment->shadowChunkMetadataMap =
420  fragment->getChunkMetadataMap(); // TODO(adb): needed?
421 
422  auto& data_mgr = catalog_.getDataMgr();
423  if (data_mgr.gpusPresent()) {
424  // flush any GPU copies of the updated chunk
425  data_mgr.deleteChunksWithPrefix(chunk_key,
427  }
428  };
429  return callback;
430  } else {
431  auto callback =
432  [this, &delete_parameters](UpdateLogForFragment const& update_log) -> void {
433  auto entries_per_column = update_log.getEntryCount();
434  auto rows_per_column = update_log.getRowCount();
435  if (rows_per_column == 0) {
436  return;
437  }
438  DeleteVictimOffsetList victim_offsets(rows_per_column);
439 
440  auto complete_row_block_size = entries_per_column / normalized_cpu_threads();
441  auto partial_row_block_size = entries_per_column % normalized_cpu_threads();
442  auto usable_threads = normalized_cpu_threads();
443 
444  if (UNLIKELY(rows_per_column < (unsigned)normalized_cpu_threads())) {
445  complete_row_block_size = rows_per_column;
446  partial_row_block_size = 0;
447  usable_threads = 1;
448  }
449 
450  std::atomic<size_t> row_idx{0};
451 
452  auto process_rows = [&update_log, &victim_offsets, &row_idx](
453  uint64_t entry_start, uint64_t entry_count) -> uint64_t {
454  uint64_t entries_processed = 0;
455 
456  for (uint64_t entry_index = entry_start;
457  entry_index < (entry_start + entry_count);
458  entry_index++) {
459  auto const row(update_log.getEntryAt(entry_index));
460 
461  if (row.empty()) {
462  continue;
463  }
464 
465  entries_processed++;
466  size_t row_index = row_idx.fetch_add(1);
467 
468  auto terminal_column_iter = std::prev(row.end());
469  const auto scalar_tv = boost::get<ScalarTargetValue>(&*terminal_column_iter);
470  CHECK(scalar_tv);
471 
472  uint64_t fragment_offset =
473  static_cast<uint64_t>(*(boost::get<int64_t>(scalar_tv)));
474  victim_offsets[row_index] = fragment_offset;
475  }
476  return entries_processed;
477  };
478 
479  auto get_row_index =
480  [complete_row_block_size](uint64_t thread_index) -> uint64_t {
481  return thread_index * complete_row_block_size;
482  };
483 
484  RowProcessingFuturesVector row_processing_futures;
485  row_processing_futures.reserve(usable_threads);
486 
487  for (unsigned i = 0; i < (unsigned)usable_threads; i++) {
488  row_processing_futures.emplace_back(
489  std::async(std::launch::async,
490  std::forward<decltype(process_rows)>(process_rows),
491  get_row_index(i),
492  complete_row_block_size));
493  }
494  if (partial_row_block_size) {
495  row_processing_futures.emplace_back(
496  std::async(std::launch::async,
497  std::forward<decltype(process_rows)>(process_rows),
498  get_row_index(usable_threads),
499  partial_row_block_size));
500  }
501 
502  uint64_t rows_processed(0);
503  for (auto& t : row_processing_futures) {
504  t.wait();
505  rows_processed += t.get();
506  }
507 
508  auto const* table_descriptor =
509  catalog_.getMetadataForTable(update_log.getPhysicalTableId());
510  CHECK(!table_is_temporary(table_descriptor));
511  auto* fragmenter = table_descriptor->fragmenter.get();
512  CHECK(fragmenter);
513 
514  auto const* deleted_column_desc = catalog_.getDeletedColumn(table_descriptor);
515  CHECK(deleted_column_desc);
516  fragmenter->updateColumn(&catalog_,
517  table_descriptor,
518  deleted_column_desc,
519  update_log.getFragmentId(),
520  victim_offsets,
521  ScalarTargetValue(int64_t(1L)),
522  update_log.getColumnType(0),
524  delete_parameters.getTransactionTracker());
525  };
526  return callback;
527  }
528  }
529 
530  private:
531  int normalized_cpu_threads() const { return cpu_threads() / 2; }
532 
533  static std::unique_ptr<int8_t[]> getRsBufferNoPadding(const ResultSet* rs,
534  size_t col_idx,
535  const SQLTypeInfo& column_type,
536  size_t row_count) {
537  const auto padded_size = rs->getPaddedSlotWidthBytes(col_idx);
538  const auto type_size = column_type.is_dict_encoded_string()
539  ? column_type.get_size()
540  : column_type.get_logical_size();
541 
542  auto rs_buffer_size = padded_size * row_count;
543  auto rs_buffer = std::make_unique<int8_t[]>(rs_buffer_size);
544  rs->copyColumnIntoBuffer(col_idx, rs_buffer.get(), rs_buffer_size);
545 
546  if (type_size < padded_size) {
547  // else we're going to remove padding and we do it inplace in the same buffer
548  // we can do updates inplace in the same buffer because type_size < padded_size
549  // for some types, like kFLOAT, simple memcpy is not enough
550  auto src_ptr = rs_buffer.get();
551  auto dst_ptr = rs_buffer.get();
552  if (column_type.is_fp()) {
553  CHECK(column_type.get_type() == kFLOAT);
554  CHECK(padded_size == sizeof(double));
555  for (size_t i = 0; i < row_count; i++) {
556  const auto old_val = *reinterpret_cast<double*>(may_alias_ptr(src_ptr));
557  auto new_val = static_cast<float>(old_val);
558  std::memcpy(dst_ptr, &new_val, type_size);
559  dst_ptr += type_size;
560  src_ptr += padded_size;
561  }
562  } else {
563  // otherwise just take first type_size bytes from the padded value
564  for (size_t i = 0; i < row_count; i++) {
565  std::memcpy(dst_ptr, src_ptr, type_size);
566  dst_ptr += type_size;
567  src_ptr += padded_size;
568  }
569  }
570  }
571  return rs_buffer;
572  }
573 
574  Executor* executor_;
576 };
#define CHECK_EQ(x, y)
Definition: Logger.h:205
std::vector< int > ChunkKey
Definition: types.h:37
StorageIOFacility::TransactionLog transaction_tracker_
HOST DEVICE int get_size() const
Definition: sqltypes.h:321
void commitUpdate()
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:101
const ColumnDescriptor * getDeletedColumn(const TableDescriptor *td) const
Definition: Catalog.cpp:3026
std::vector< TargetMetaInfo > UpdateTargetTypeList
StorageIOFacility::UpdateCallback yieldDeleteCallback(DeleteTransactionParameters &delete_parameters)
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:222
static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:155
bool is_fp() const
Definition: sqltypes.h:482
static std::unique_ptr< int8_t[]> getRsBufferNoPadding(const ResultSet *rs, size_t col_idx, const SQLTypeInfo &column_type, size_t row_count)
int normalized_cpu_threads() const
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:311
UpdateTransactionParameters & operator=(UpdateTransactionParameters const &other)=delete
DeleteTransactionParameters(const bool table_is_temporary)
std::function< void(const UpdateLogForFragment &)> Callback
Definition: Execute.h:351
TransactionParameters(const bool table_is_temporary)
std::vector< std::string > UpdateTargetColumnNamesList
std::vector< uint64_t > UpdateTargetOffsetList
StorageIOFacility(Executor *executor, Catalog_Namespace::Catalog const &catalog)
StorageIOFacility::UpdateCallback yieldUpdateCallback(UpdateTransactionParameters &update_parameters)
int get_logical_size() const
Definition: sqltypes.h:322
const DBMetadata & getCurrentDB() const
Definition: Catalog.h:221
Catalog_Namespace::Catalog const & catalog_
DeleteTransactionParameters & operator=(DeleteTransactionParameters const &other)=delete
const ColumnDescriptor * getMetadataForColumn(int tableId, const std::string &colName) const
static std::shared_ptr< Chunk > getChunk(const ColumnDescriptor *cd, DataMgr *data_mgr, const ChunkKey &key, const MemoryLevel mem_level, const int deviceId, const size_t num_bytes, const size_t num_elems)
Definition: Chunk.cpp:28
std::shared_ptr< Fragmenter_Namespace::AbstractFragmenter > fragmenter
#define UNLIKELY(x)
Definition: likely.h:25
std::unique_ptr< TransactionLog > TransactionLogPtr
std::function< bool(std::string const &)> ColumnValidationFunction
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:436
bool table_is_temporary(const TableDescriptor *const td)
UpdateLogForFragment::Callback UpdateCallback
#define CHECK(condition)
Definition: Logger.h:197
StorageIOFacility::TransactionLog & getTransactionTracker()
std::vector< uint64_t > DeleteVictimOffsetList
UpdateTransactionParameters(TableDescriptorType const *table_desc, UpdateTargetColumnNamesList const &update_column_names, UpdateTargetTypeList const &target_types, bool varlen_update_required)
bool is_dict_encoded_string() const
Definition: sqltypes.h:512
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
int cpu_threads()
Definition: thread_count.h:24
boost::variant< int64_t, double, float, NullableString > ScalarTargetValue
Definition: TargetValue.h:156