OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InsertDataLoader.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019, OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <algorithm>
18 #include <numeric>
19 #include <vector>
20 
21 #include "../Shared/shard_key.h"
22 #include "Geospatial/Types.h"
23 #include "InsertDataLoader.h"
25 
26 namespace Fragmenter_Namespace {
27 
29  std::vector<std::vector<uint8_t>> rawData;
30  std::vector<std::vector<std::string>> stringData;
31  std::vector<std::vector<ArrayDatum>> arrayData;
32 };
33 
34 template <typename SRC>
35 std::vector<std::vector<size_t>> compute_row_indices_of_shards(
36  size_t shard_count,
37  size_t leaf_count,
38  size_t row_count,
39  SRC* src,
40  bool duplicated_key_value) {
41  const auto n_shard_tables = shard_count * leaf_count;
42  std::vector<std::vector<size_t>> row_indices_of_shards(n_shard_tables);
43  if (!duplicated_key_value) {
44  for (size_t row = 0; row < row_count; row++) {
45  // expecting unsigned data
46  // thus, no need for double remainder
47  auto shard_id = (std::is_unsigned<SRC>::value)
48  ? src[row] % n_shard_tables
49  : SHARD_FOR_KEY(src[row], n_shard_tables);
50  row_indices_of_shards[shard_id].push_back(row);
51  }
52  } else {
53  auto shard_id = (std::is_unsigned<SRC>::value)
54  ? src[0] % n_shard_tables
55  : SHARD_FOR_KEY(src[0], n_shard_tables);
56  row_indices_of_shards[shard_id].reserve(row_count);
57  for (size_t row = 0; row < row_count; row++) {
58  row_indices_of_shards[shard_id].push_back(row);
59  }
60  }
61 
62  return row_indices_of_shards;
63 }
64 
65 template <typename T>
66 size_t indexOf(std::vector<T>& vec, T val) {
67  typename std::vector<T>::iterator it = std::find(vec.begin(), vec.end(), val);
68  CHECK(it != vec.end());
69  return std::distance(vec.begin(), it);
70 }
71 
73  return (cd->columnType.is_geometry()) ||
74  (cd->columnType.is_string() &&
76 }
77 
79  return cd->columnType.is_array();
80 }
81 
83  const ColumnDescriptor* cd,
84  const bool get_logical_size = true) {
85  switch (cd->columnType.get_type()) {
86  case kPOINT:
87  case kLINESTRING:
88  case kPOLYGON:
89  case kMULTIPOLYGON:
90  case kARRAY:
91  throw std::runtime_error("geo and array columns have variable length elements");
92  case kBOOLEAN:
93  case kTINYINT:
94  case kSMALLINT:
95  case kINT:
96  case kBIGINT:
97  case kNUMERIC:
98  case kDECIMAL:
99  case kFLOAT:
100  case kDOUBLE:
101  case kTIMESTAMP:
102  case kTIME:
103  case kINTERVAL_DAY_TIME:
105  case kDATE:
106  return get_logical_size ? cd->columnType.get_logical_size()
107  : cd->columnType.get_size();
108  case kTEXT:
109  case kVARCHAR:
110  case kCHAR:
112  throw std::runtime_error(
113  "non encoded string columns have variable length elements");
114  }
115  return cd->columnType.get_size();
116  default:
117  throw std::runtime_error("not supported column type: " + cd->columnName + " (" +
118  cd->columnType.get_type_name() + ")");
119  }
120 }
121 
122 std::vector<std::vector<size_t>> compute_row_indices_of_shards(
124  size_t leaf_count,
125  const InsertChunks& insert_chunks) {
126  const auto* td = cat.getMetadataForTable(insert_chunks.table_id);
127  const auto* shard_cd = cat.getShardColumnMetadataForTable(td);
128  auto find_it = insert_chunks.chunks.find(shard_cd->columnId);
129  CHECK(find_it != insert_chunks.chunks.end());
130  Chunk_NS::Chunk& shard_chunk = *find_it->second;
131  auto row_count = shard_chunk.getBuffer()->getEncoder()->getNumElems();
132  auto shard_count = td->nShards;
133 
134  CHECK(!isStringVectorData(shard_cd));
135  CHECK(!isDatumVectorData(shard_cd));
136 
137  auto memory_ptr = shard_chunk.getBuffer()->getMemoryPtr();
138  CHECK(memory_ptr);
139  switch (size_of_raw_column(cat, shard_cd, false)) {
140  case 1:
141  return compute_row_indices_of_shards(shard_count,
142  leaf_count,
143  row_count,
144  reinterpret_cast<uint8_t*>(memory_ptr),
145  false);
146  case 2:
147  return compute_row_indices_of_shards(shard_count,
148  leaf_count,
149  row_count,
150  reinterpret_cast<uint16_t*>(memory_ptr),
151  false);
152  case 4:
153  return compute_row_indices_of_shards(shard_count,
154  leaf_count,
155  row_count,
156  reinterpret_cast<uint32_t*>(memory_ptr),
157  false);
158  case 8:
159  return compute_row_indices_of_shards(shard_count,
160  leaf_count,
161  row_count,
162  reinterpret_cast<uint64_t*>(memory_ptr),
163  false);
164  default:
165  UNREACHABLE() << "unexpected data element size of column";
166  }
167  return {};
168 }
169 
170 std::vector<std::vector<size_t>> computeRowIndicesOfShards(
172  size_t leafCount,
173  InsertData& insert_data) {
174  const auto* td = cat.getMetadataForTable(insert_data.tableId);
175  const auto* shard_cd = cat.getShardColumnMetadataForTable(td);
176  auto shardDataBlockIndex = indexOf(insert_data.columnIds, shard_cd->columnId);
177  DataBlockPtr& shardDataBlock = insert_data.data[shardDataBlockIndex];
178  auto rowCount = insert_data.numRows;
179  auto shardCount = td->nShards;
180 
181  CHECK(!isStringVectorData(shard_cd));
182  CHECK(!isDatumVectorData(shard_cd));
183 
184  CHECK(insert_data.is_default.size() == insert_data.columnIds.size());
185  bool is_default = insert_data.is_default[shardDataBlockIndex];
186  switch (size_of_raw_column(cat, shard_cd)) {
187  case 1:
189  shardCount,
190  leafCount,
191  rowCount,
192  reinterpret_cast<uint8_t*>(shardDataBlock.numbersPtr),
193  is_default);
194  case 2:
196  shardCount,
197  leafCount,
198  rowCount,
199  reinterpret_cast<uint16_t*>(shardDataBlock.numbersPtr),
200  is_default);
201  case 4:
203  shardCount,
204  leafCount,
205  rowCount,
206  reinterpret_cast<uint32_t*>(shardDataBlock.numbersPtr),
207  is_default);
208  case 8:
210  shardCount,
211  leafCount,
212  rowCount,
213  reinterpret_cast<uint64_t*>(shardDataBlock.numbersPtr),
214  is_default);
215  }
216  throw std::runtime_error("Unexpected data block element size");
217 }
218 
219 template <typename T>
220 void copyColumnDataOfShard(const std::vector<size_t>& rowIndices, T* src, T* dst) {
221  for (size_t row = 0; row < rowIndices.size(); row++) {
222  auto srcRowIndex = rowIndices[row];
223  dst[row] = src[srcRowIndex];
224  }
225 }
226 
228  int columnId;
231 };
232 
234  ShardDataOwner& dataOwner,
235  const std::vector<size_t>& rowIndices,
236  const ColumnDescriptor* pCol,
237  size_t columnIndex,
238  DataBlockPtr dataBlock,
239  bool is_default) {
240  DataBlockPtr ret;
241  std::vector<size_t> single_row_idx({0ul});
242  const std::vector<size_t>& rows = is_default ? single_row_idx : rowIndices;
243  if (isStringVectorData(pCol)) {
244  auto& data = dataOwner.stringData[columnIndex];
245  data.resize(rows.size());
246  copyColumnDataOfShard(rows, &(*(dataBlock.stringsPtr))[0], &data[0]);
247  ret.stringsPtr = &data;
248 
249  } else if (isDatumVectorData(pCol)) {
250  auto& data = dataOwner.arrayData[columnIndex];
251  data.resize(rows.size());
252  copyColumnDataOfShard(rows, &(*(dataBlock.arraysPtr))[0], &data[0]);
253  ret.arraysPtr = &data;
254 
255  } else {
256  auto rawArrayElementSize = size_of_raw_column(cat, pCol);
257  auto& data = dataOwner.rawData[columnIndex];
258  data.resize(rows.size() * rawArrayElementSize);
259 
260  switch (rawArrayElementSize) {
261  case 1: {
263  reinterpret_cast<uint8_t*>(dataBlock.numbersPtr),
264  reinterpret_cast<uint8_t*>(&data[0]));
265  break;
266  }
267  case 2: {
269  reinterpret_cast<uint16_t*>(dataBlock.numbersPtr),
270  reinterpret_cast<uint16_t*>(&data[0]));
271  break;
272  }
273  case 4: {
275  reinterpret_cast<uint32_t*>(dataBlock.numbersPtr),
276  reinterpret_cast<uint32_t*>(&data[0]));
277  break;
278  }
279  case 8: {
281  reinterpret_cast<uint64_t*>(dataBlock.numbersPtr),
282  reinterpret_cast<uint64_t*>(&data[0]));
283  break;
284  }
285  default:
286  throw std::runtime_error("Unexpected data block element size");
287  }
288 
289  ret.numbersPtr = reinterpret_cast<int8_t*>(&data[0]);
290  }
291 
292  return {pCol->columnId, ret, is_default};
293 }
294 
295 std::pair<std::list<std::unique_ptr<foreign_storage::ForeignStorageBuffer>>, InsertChunks>
297  const InsertChunks& insert_chunks,
298  int shardTableIndex,
299  const std::vector<size_t>& rowIndices) {
300  const auto* table = cat.getMetadataForTable(insert_chunks.table_id);
301  const auto* physical_table = cat.getPhysicalTablesDescriptors(table)[shardTableIndex];
302 
303  InsertChunks insert_chunks_for_shard{
304  physical_table->tableId, insert_chunks.db_id, {}, {}};
305 
306  std::list<std::unique_ptr<foreign_storage::ForeignStorageBuffer>> buffers;
307 
308  for (const auto& [column_id, chunk] : insert_chunks.chunks) {
309  auto column = chunk->getColumnDesc();
310  insert_chunks_for_shard.chunks[column_id] = std::make_shared<Chunk_NS::Chunk>(column);
311  auto& chunk_for_shard = *insert_chunks_for_shard.chunks[column_id];
312  chunk_for_shard.setBuffer(
313  buffers.emplace_back(std::make_unique<foreign_storage::ForeignStorageBuffer>())
314  .get());
315  if (column->columnType.is_varlen_indeed()) { // requires an index buffer
316  chunk_for_shard.setIndexBuffer(
317  buffers.emplace_back(std::make_unique<foreign_storage::ForeignStorageBuffer>())
318  .get());
319  }
320  chunk_for_shard.initEncoder();
321  chunk_for_shard.appendEncodedDataAtIndices(*chunk, rowIndices);
322  CHECK_EQ(chunk_for_shard.getBuffer()->getEncoder()->getNumElems(), rowIndices.size());
323  }
324 
325  // mark which row indices are valid for import
326  auto row_count = rowIndices.size();
327  insert_chunks_for_shard.valid_row_indices.reserve(row_count);
328  for (size_t irow = 0; irow < row_count; ++irow) {
329  auto row_index = rowIndices[irow];
330  if (std::binary_search(insert_chunks.valid_row_indices.begin(),
331  insert_chunks.valid_row_indices.end(),
332  row_index)) {
333  insert_chunks_for_shard.valid_row_indices.emplace_back(irow);
334  }
335  }
336 
337  return {std::move(buffers), insert_chunks_for_shard};
338 }
339 
341  ShardDataOwner& dataOwner,
342  InsertData& insert_data,
343  int shardTableIndex,
344  const std::vector<size_t>& rowIndices) {
345  const auto* td = cat.getMetadataForTable(insert_data.tableId);
346  const auto* ptd = cat.getPhysicalTablesDescriptors(td)[shardTableIndex];
347 
348  InsertData shardData;
349  shardData.databaseId = insert_data.databaseId;
350  shardData.tableId = ptd->tableId;
351  shardData.numRows = rowIndices.size();
352 
353  std::vector<const ColumnDescriptor*> pCols;
354  std::vector<int> lCols;
355 
356  {
357  auto logicalColumns = cat.getAllColumnMetadataForTable(td->tableId, true, true, true);
358  for (const auto& cd : logicalColumns) {
359  lCols.push_back(cd->columnId);
360  }
361 
362  auto physicalColumns =
363  cat.getAllColumnMetadataForTable(ptd->tableId, true, true, true);
364  for (const auto& cd : physicalColumns) {
365  pCols.push_back(cd);
366  }
367  }
368 
369  for (size_t col = 0; col < insert_data.columnIds.size(); col++) {
370  dataOwner.arrayData.emplace_back();
371  dataOwner.rawData.emplace_back();
372  dataOwner.stringData.emplace_back();
373  }
374 
375  auto copycat = [&cat, &dataOwner, &rowIndices, &lCols, &pCols, &insert_data](int col) {
376  const auto lColId = insert_data.columnIds[col];
377  const auto pCol = pCols[indexOf(lCols, lColId)];
378  return copyColumnDataOfShard(cat,
379  dataOwner,
380  rowIndices,
381  pCol,
382  col,
383  insert_data.data[col],
384  insert_data.is_default[col]);
385  };
386 
387  std::vector<std::future<BlockWithColumnId>> worker_threads;
388  for (size_t col = 0; col < insert_data.columnIds.size(); col++) {
389  worker_threads.push_back(std::async(std::launch::async, copycat, col));
390  }
391 
392  for (auto& child : worker_threads) {
393  child.wait();
394  }
395 
396  for (auto& child : worker_threads) {
397  auto shardColumnData = child.get();
398  shardData.columnIds.push_back(shardColumnData.columnId);
399  shardData.data.push_back(shardColumnData.block);
400  shardData.is_default.push_back(shardColumnData.is_default);
401  }
402 
403  return shardData;
404 }
405 
407  std::unique_lock current_leaf_index_lock(current_leaf_index_mutex_);
408  size_t starting_leaf_index = current_leaf_index_;
412  }
413  return starting_leaf_index;
414 }
415 
417  const InsertChunks& insert_chunks) {
418  const auto& cat = session_info.getCatalog();
419  const auto* td = cat.getMetadataForTable(insert_chunks.table_id);
420 
421  CHECK(td);
422  if (td->nShards == 0) {
423  connector_.insertChunksToLeaf(session_info, moveToNextLeaf(), insert_chunks);
424  } else {
425  // we have a sharded target table, start spreading to physical tables
426  auto row_indices_of_shards =
428 
429  auto insert_shard_data =
430  [this, &session_info, &insert_chunks, &cat, &td, &row_indices_of_shards](
431  size_t shardId) {
432  const auto shard_tables = cat.getPhysicalTablesDescriptors(td);
433  auto stard_table_idx = shardId % td->nShards;
434  auto shard_leaf_idx = shardId / td->nShards;
435 
436  const auto& row_indices_of_shard = row_indices_of_shards[shardId];
437 
438  auto [buffers, shard_insert_chunks] = copy_data_of_shard(
439  cat, insert_chunks, stard_table_idx, row_indices_of_shard);
441  session_info, shard_leaf_idx, shard_insert_chunks);
442  };
443 
444  std::vector<std::future<void>> worker_threads;
445  for (size_t shard_id = 0; shard_id < row_indices_of_shards.size(); shard_id++) {
446  if (row_indices_of_shards[shard_id].size() > 0) {
447  worker_threads.push_back(
448  std::async(std::launch::async, insert_shard_data, shard_id));
449  }
450  }
451  for (auto& child : worker_threads) {
452  child.wait();
453  }
454  for (auto& child : worker_threads) {
455  child.get();
456  }
457  }
458 }
459 
461  InsertData& insert_data) {
462  const auto& cat = session_info.getCatalog();
463  const auto* td = cat.getMetadataForTable(insert_data.tableId);
464 
465  CHECK(td);
466  if (td->nShards == 0) {
467  connector_.insertDataToLeaf(session_info, moveToNextLeaf(), insert_data);
468  } else {
469  // we have a sharded target table, start spreading to physical tables
470  auto rowIndicesOfShards =
472 
473  auto insertShardData =
474  [this, &session_info, &insert_data, &cat, &td, &rowIndicesOfShards](
475  size_t shardId) {
476  const auto shard_tables = cat.getPhysicalTablesDescriptors(td);
477  auto stardTableIdx = shardId % td->nShards;
478  auto shardLeafIdx = shardId / td->nShards;
479 
480  const auto& rowIndicesOfShard = rowIndicesOfShards[shardId];
481  ShardDataOwner shardDataOwner;
482 
483  InsertData shardData = copyDataOfShard(
484  cat, shardDataOwner, insert_data, stardTableIdx, rowIndicesOfShard);
485  CHECK(shardData.numRows > 0);
486  connector_.insertDataToLeaf(session_info, shardLeafIdx, shardData);
487  };
488 
489  std::vector<std::future<void>> worker_threads;
490  for (size_t shardId = 0; shardId < rowIndicesOfShards.size(); shardId++) {
491  if (rowIndicesOfShards[shardId].size() > 0) {
492  worker_threads.push_back(
493  std::async(std::launch::async, insertShardData, shardId));
494  }
495  }
496  for (auto& child : worker_threads) {
497  child.wait();
498  }
499  for (auto& child : worker_threads) {
500  child.get();
501  }
502  }
503 }
504 
506  const Catalog_Namespace::SessionInfo& session,
507  const size_t leaf_idx,
508  const Fragmenter_Namespace::InsertChunks& insert_chunks) {
509  CHECK(leaf_idx == 0);
510  auto& catalog = session.getCatalog();
511  auto created_td = catalog.getMetadataForTable(insert_chunks.table_id);
512  created_td->fragmenter->insertChunksNoCheckpoint(insert_chunks);
513 }
514 
516  const size_t leaf_idx,
517  InsertData& insert_data) {
518  CHECK(leaf_idx == 0);
519  auto& catalog = session.getCatalog();
520  auto created_td = catalog.getMetadataForTable(insert_data.tableId);
521  created_td->fragmenter->insertDataNoCheckpoint(insert_data);
522 }
523 
525  int table_id) {
526  auto& catalog = session.getCatalog();
527  catalog.checkpointWithAutoRollback(table_id);
528 }
529 
531  int table_id) {
532  auto& catalog = session.getCatalog();
533  auto db_id = catalog.getDatabaseId();
534  auto table_epochs = catalog.getTableEpochs(db_id, table_id);
535  catalog.setTableEpochs(db_id, table_epochs);
536 }
537 
538 } // namespace Fragmenter_Namespace
#define CHECK_EQ(x, y)
Definition: Logger.h:231
HOST DEVICE int get_size() const
Definition: sqltypes.h:339
std::string cat(Ts &&...args)
void insertChunks(const Catalog_Namespace::SessionInfo &session_info, const InsertChunks &insert_chunks)
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:114
Definition: sqltypes.h:49
std::vector< std::string > * stringsPtr
Definition: sqltypes.h:227
std::vector< ArrayDatum > * arraysPtr
Definition: sqltypes.h:228
std::pair< std::list< std::unique_ptr< foreign_storage::ForeignStorageBuffer > >, InsertChunks > copy_data_of_shard(const Catalog_Namespace::Catalog &cat, const InsertChunks &insert_chunks, int shardTableIndex, const std::vector< size_t > &rowIndices)
std::vector< std::vector< size_t > > computeRowIndicesOfShards(const Catalog_Namespace::Catalog &cat, size_t leafCount, InsertData &insert_data)
#define UNREACHABLE()
Definition: Logger.h:267
std::vector< bool > is_default
Definition: Fragmenter.h:75
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:329
virtual void insertDataToLeaf(const Catalog_Namespace::SessionInfo &parent_session_info, const size_t leaf_idx, Fragmenter_Namespace::InsertData &insert_data)=0
const ColumnDescriptor * getShardColumnMetadataForTable(const TableDescriptor *td) const
Definition: Catalog.cpp:4247
bool isStringVectorData(const ColumnDescriptor *cd)
void insertData(const Catalog_Namespace::SessionInfo &session_info, InsertData &insert_data)
int tableId
identifies the database into which the data is being inserted
Definition: Fragmenter.h:70
size_t numRows
a vector of column ids for the row(s) being inserted
Definition: Fragmenter.h:72
void checkpoint(const Catalog_Namespace::SessionInfo &parent_session_info, int tableId) override
size_t size_of_raw_column(const Catalog_Namespace::Catalog &cat, const ColumnDescriptor *cd, const bool get_logical_size=true)
future< Result > async(Fn &&fn, Args &&...args)
int get_logical_size() const
Definition: sqltypes.h:349
void insertDataToLeaf(const Catalog_Namespace::SessionInfo &parent_session_info, const size_t leaf_idx, Fragmenter_Namespace::InsertData &insert_data) override
size_t getNumElems() const
Definition: Encoder.h:284
std::vector< std::vector< uint8_t > > rawData
int getDatabaseId() const
Definition: Catalog.h:284
specifies the content in-memory of a row in the column metadata table
std::vector< std::vector< size_t > > compute_row_indices_of_shards(size_t shard_count, size_t leaf_count, size_t row_count, SRC *src, bool duplicated_key_value)
void checkpointWithAutoRollback(const int logical_table_id) const
Definition: Catalog.cpp:4426
std::vector< const TableDescriptor * > getPhysicalTablesDescriptors(const TableDescriptor *logical_table_desc, bool populate_fragmenter=true) const
Definition: Catalog.cpp:4265
std::shared_ptr< Fragmenter_Namespace::AbstractFragmenter > fragmenter
size_t indexOf(std::vector< T > &vec, T val)
Definition: sqltypes.h:52
Definition: sqltypes.h:53
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
std::vector< DataBlockPtr > data
the number of rows being inserted
Definition: Fragmenter.h:73
AbstractBuffer * getBuffer() const
Definition: Chunk.h:146
Catalog & getCatalog() const
Definition: SessionInfo.h:65
std::map< int, std::shared_ptr< Chunk_NS::Chunk > > chunks
Definition: Fragmenter.h:52
void insertChunksToLeaf(const Catalog_Namespace::SessionInfo &parent_session_info, const size_t leaf_idx, const Fragmenter_Namespace::InsertChunks &insert_chunks) override
std::list< const ColumnDescriptor * > getAllColumnMetadataForTable(const int tableId, const bool fetchSystemColumns, const bool fetchVirtualColumns, const bool fetchPhysicalColumns) const
Returns a list of pointers to constant ColumnDescriptor structs for all the columns from a particular...
Definition: Catalog.cpp:1941
void copyColumnDataOfShard(const std::vector< size_t > &rowIndices, T *src, T *dst)
std::vector< std::vector< ArrayDatum > > arrayData
std::string get_type_name() const
Definition: sqltypes.h:443
Definition: sqltypes.h:41
void rollback(const Catalog_Namespace::SessionInfo &parent_session_info, int tableId) override
std::vector< size_t > valid_row_indices
Definition: Fragmenter.h:53
bool isDatumVectorData(const ColumnDescriptor *cd)
std::vector< std::vector< std::string > > stringData
#define CHECK(condition)
Definition: Logger.h:223
bool is_geometry() const
Definition: sqltypes.h:522
The data to be inserted using the fragment manager.
Definition: Fragmenter.h:68
Definition: sqltypes.h:45
SQLTypeInfo columnType
InsertData copyDataOfShard(const Catalog_Namespace::Catalog &cat, ShardDataOwner &dataOwner, InsertData &insert_data, int shardTableIndex, const std::vector< size_t > &rowIndices)
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
bool is_string() const
Definition: sqltypes.h:510
int8_t * numbersPtr
Definition: sqltypes.h:226
std::vector< int > columnIds
identifies the table into which the data is being inserted
Definition: Fragmenter.h:71
virtual void insertChunksToLeaf(const Catalog_Namespace::SessionInfo &parent_session_info, const size_t leaf_idx, const Fragmenter_Namespace::InsertChunks &insert_chunks)=0
std::string columnName
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20
bool is_array() const
Definition: sqltypes.h:518