OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TableArchiver.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <algorithm>
20 #include <boost/filesystem.hpp>
21 #include <boost/process.hpp>
22 #include <boost/range/combine.hpp>
23 #include <cerrno>
24 #include <cstdio>
25 #include <cstring>
26 #include <exception>
27 #include <list>
28 #include <memory>
29 #include <regex>
30 #include <set>
31 #include <sstream>
32 #include <system_error>
33 
35 #include "LockMgr/LockMgr.h"
36 #include "Parser/ParseDDL.h"
37 #include "Shared/File.h"
38 #include "Shared/Logger.h"
39 #include "Shared/StringTransform.h"
41 #include "Shared/measure.h"
42 #include "Shared/thread_count.h"
43 
44 extern bool g_cluster;
46 
47 constexpr static char const* table_schema_filename = "_table.sql";
48 constexpr static char const* table_oldinfo_filename = "_table.oldinfo";
49 constexpr static char const* table_epoch_filename = "_table.epoch";
50 
51 namespace std {
52 
53 template <typename T, typename U>
54 struct tuple_size<boost::tuples::cons<T, U>>
55  : boost::tuples::length<boost::tuples::cons<T, U>> {};
56 template <size_t I, typename T, typename U>
57 struct tuple_element<I, boost::tuples::cons<T, U>>
58  : boost::tuples::element<I, boost::tuples::cons<T, U>> {};
59 
60 } // namespace std
61 
62 namespace {
63 
64 inline auto simple_file_closer = [](FILE* f) { std::fclose(f); };
65 
66 inline std::string abs_path(const File_Namespace::GlobalFileMgr* global_file_mgr) {
67  return boost::filesystem::canonical(global_file_mgr->getBasePath()).string();
68 }
69 
70 inline std::string run(const std::string& cmd, const std::string& chdir = "") {
71  VLOG(3) << "running cmd: " << cmd;
72  int rcode;
73  std::error_code ec;
74  std::string output, errors;
75  const auto time_ms = measure<>::execution([&]() {
76  using namespace boost::process;
77  ipstream stdout, stderr;
78  if (!chdir.empty()) {
79  rcode = system(cmd, std_out > stdout, std_err > stderr, ec, start_dir = chdir);
80  } else {
81  rcode = system(cmd, std_out > stdout, std_err > stderr, ec);
82  }
83  std::ostringstream ss_output, ss_errors;
84  stdout >> ss_output.rdbuf();
85  stderr >> ss_errors.rdbuf();
86  output = ss_output.str();
87  errors = ss_errors.str();
88  });
89  if (rcode || ec) {
90  LOG(ERROR) << "failed cmd: " << cmd;
91  LOG(ERROR) << "exit code: " << rcode;
92  LOG(ERROR) << "error code: " << ec.value() << " - " << ec.message();
93  LOG(ERROR) << "stdout: " << output;
94  LOG(ERROR) << "stderr: " << errors;
95 #if defined(__APPLE__)
96  // osx bsdtar options "--use-compress-program" and "--fast-read" together
97  // run into pipe write error after tar extracts the first occurrence of a
98  // file and closes the read end while the decompression program still writes
99  // to the pipe. bsdtar doesn't handle this situation well like gnu tar does.
100  if (1 == rcode && cmd.find("--fast-read") &&
101  (errors.find("cannot write decoded block") != std::string::npos ||
102  errors.find("Broken pipe") != std::string::npos)) {
103  // ignore this error, or lose speed advantage of "--fast-read" on osx.
104  LOG(ERROR) << "tar error ignored on osx for --fast-read";
105  } else
106 #endif
107  // circumvent tar warning on reading file that is "changed as we read it".
108  // this warning results from reading a table file under concurrent inserts
109  if (1 == rcode && errors.find("changed as we read") != std::string::npos) {
110  LOG(ERROR) << "tar error ignored under concurrent inserts";
111  } else {
112  throw std::runtime_error("Failed to run command: " + cmd +
113  "\nexit code: " + std::to_string(rcode) + "\nerrors:\n" +
114  (rcode ? errors : ec.message()));
115  }
116  } else {
117  VLOG(3) << "finished cmd: " << cmd;
118  VLOG(3) << "time: " << time_ms << " ms";
119  VLOG(3) << "stdout: " << output;
120  }
121  return output;
122 }
123 
124 inline std::string simple_file_cat(const std::string& archive_path,
125  const std::string& file_name,
126  const std::string& compression) {
127  filename_security_check(archive_path);
128 #if defined(__APPLE__)
129  constexpr static auto opt_occurrence = "--fast-read";
130 #else
131  constexpr static auto opt_occurrence = "--occurrence=1";
132 #endif
133  boost::filesystem::path temp_dir =
134  boost::filesystem::temp_directory_path() / boost::filesystem::unique_path();
135  boost::filesystem::create_directories(temp_dir);
136  run("tar " + compression + " -xvf " + get_quoted_string(archive_path) + " " +
137  opt_occurrence + " " + file_name,
138  temp_dir.string());
139  const auto output = run("cat " + (temp_dir / file_name).string());
140  boost::filesystem::remove_all(temp_dir);
141  return output;
142 }
143 
144 inline std::string get_table_schema(const std::string& archive_path,
145  const std::string& table,
146  const std::string& compression) {
147  const auto schema_str =
148  simple_file_cat(archive_path, table_schema_filename, compression);
149  std::regex regex("@T");
150  return std::regex_replace(schema_str, regex, table);
151 }
152 
153 // Adjust column ids in chunk keys in a table's data files under a temp_data_dir,
154 // including files of all shards of the table. Can be slow for big files but should
155 // be scale faster than refragmentizing. Table altering should be rare for olap.
156 void adjust_altered_table_files(const std::string& temp_data_dir,
157  const std::unordered_map<int, int>& column_ids_map) {
158  boost::filesystem::path base_path(temp_data_dir);
159  boost::filesystem::recursive_directory_iterator end_it;
161  for (boost::filesystem::recursive_directory_iterator fit(base_path); fit != end_it;
162  ++fit) {
163  if (boost::filesystem::is_regular_file(fit->status())) {
164  const std::string file_path = fit->path().string();
165  const std::string file_name = fit->path().filename().string();
166  std::vector<std::string> tokens;
167  boost::split(tokens, file_name, boost::is_any_of("."));
168  // ref. FileMgr::init for hint of data file name layout
169  if (tokens.size() > 2 && MAPD_FILE_EXT == "." + tokens[2]) {
170  thread_controller.startThread([file_name, file_path, tokens, &column_ids_map] {
171  const auto page_size = boost::lexical_cast<int64_t>(tokens[1]);
172  const auto file_size = boost::filesystem::file_size(file_path);
173  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
174  std::fopen(file_path.c_str(), "r+"), simple_file_closer);
175  if (!fp) {
176  throw std::runtime_error("Failed to open " + file_path +
177  " for update: " + std::strerror(errno));
178  }
179  // ref. FileInfo::openExistingFile for hint of chunk header layout
180  for (size_t page = 0; page < file_size / page_size; ++page) {
181  int ints[8];
182  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
183  throw std::runtime_error("Failed to seek to page# " + std::to_string(page) +
184  file_path + " for read: " + std::strerror(errno));
185  }
186  if (1 != fread(ints, sizeof ints, 1, fp.get())) {
187  throw std::runtime_error("Failed to read " + file_path + ": " +
188  std::strerror(errno));
189  }
190  if (ints[0] > 0) { // header size
191  auto cit = column_ids_map.find(ints[3]);
192  CHECK(cit != column_ids_map.end());
193  if (ints[3] != cit->second) {
194  ints[3] = cit->second;
195  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
196  throw std::runtime_error("Failed to seek to page# " +
197  std::to_string(page) + file_path +
198  " for write: " + std::strerror(errno));
199  }
200  if (1 != fwrite(ints, sizeof ints, 1, fp.get())) {
201  throw std::runtime_error("Failed to write " + file_path + ": " +
202  std::strerror(errno));
203  }
204  }
205  }
206  }
207  });
208  thread_controller.checkThreadsStatus();
209  }
210  }
211  }
212  thread_controller.finish();
213 }
214 
216  const std::string& temp_data_dir,
217  const std::vector<std::string>& target_paths,
218  const std::string& name_prefix) {
219  boost::filesystem::path base_path(temp_data_dir);
220  boost::filesystem::directory_iterator end_it;
221  int target_path_index = 0;
222  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
223  if (!boost::filesystem::is_regular_file(fit->status())) {
224  const std::string file_path = fit->path().string();
225  const std::string file_name = fit->path().filename().string();
226  if (boost::istarts_with(file_name, name_prefix)) {
227  const std::string target_path =
228  abs_path(global_file_mgr) + "/" + target_paths[target_path_index++];
229  if (std::rename(file_path.c_str(), target_path.c_str())) {
230  throw std::runtime_error("Failed to rename file " + file_path + " to " +
231  target_path + ": " + std::strerror(errno));
232  }
233  }
234  }
235  }
236 }
237 
238 } // namespace
239 
241  const std::string& archive_path,
242  const std::string& compression) {
243  filename_security_check(archive_path);
244  if (g_cluster) {
245  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
246  }
247  if (boost::filesystem::exists(archive_path)) {
248  throw std::runtime_error("Archive " + archive_path + " already exists.");
249  }
251  throw std::runtime_error("Dumping view or temporary table is not supported.");
252  }
253  // collect paths of files to archive
254  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
255  std::vector<std::string> file_paths;
256  auto file_writer = [&file_paths, global_file_mgr](const std::string& file_name,
257  const std::string& file_type,
258  const std::string& file_data) {
259  const auto file_path = abs_path(global_file_mgr) + "/" + file_name;
260  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
261  std::fopen(file_path.c_str(), "w"), simple_file_closer);
262  if (!fp) {
263  throw std::runtime_error("Failed to create " + file_type + " file '" + file_path +
264  "': " + std::strerror(errno));
265  }
266  if (std::fwrite(file_data.data(), 1, file_data.size(), fp.get()) < file_data.size()) {
267  throw std::runtime_error("Failed to write " + file_type + " file '" + file_path +
268  "': " + std::strerror(errno));
269  }
270  file_paths.push_back(file_name);
271  };
272  // Prevent modification of the table schema during a dump operation, while allowing
273  // concurrent inserts.
274  auto table_read_lock =
276  const auto table_name = td->tableName;
277  {
278  // - gen schema file
279  const auto schema_str = cat_->dumpSchema(td);
280  file_writer(table_schema_filename, "table schema", schema_str);
281  // - gen column-old-info file
282  const auto cds = cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
283  std::vector<std::string> column_oldinfo;
284  std::transform(cds.begin(),
285  cds.end(),
286  std::back_inserter(column_oldinfo),
287  [&](const auto cd) -> std::string {
288  return cd->columnName + ":" + std::to_string(cd->columnId) + ":" +
290  });
291  const auto column_oldinfo_str = boost::algorithm::join(column_oldinfo, " ");
292  file_writer(table_oldinfo_filename, "table old info", column_oldinfo_str);
293  // - gen table epoch
294  const auto epoch = cat_->getTableEpoch(cat_->getCurrentDB().dbId, td->tableId);
295  file_writer(table_epoch_filename, "table epoch", std::to_string(epoch));
296  // - collect table data file paths ...
297  const auto data_file_dirs = cat_->getTableDataDirectories(td);
298  file_paths.insert(file_paths.end(), data_file_dirs.begin(), data_file_dirs.end());
299  // - collect table dict file paths ...
300  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
301  file_paths.insert(file_paths.end(), dict_file_dirs.begin(), dict_file_dirs.end());
302  // tar takes time. release cat lock to yield the cat to concurrent CREATE statements.
303  }
304  // run tar to archive the files ... this may take a while !!
305  run("tar " + compression + " -cvf " + get_quoted_string(archive_path) + " " +
306  boost::algorithm::join(file_paths, " "),
307  abs_path(global_file_mgr));
308 }
309 
310 // Restore data and dict files of a table from a tgz archive.
312  const TableDescriptor* td,
313  const std::string& archive_path,
314  const std::string& compression) {
315  filename_security_check(archive_path);
316  if (g_cluster) {
317  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
318  }
319  if (!boost::filesystem::exists(archive_path)) {
320  throw std::runtime_error("Archive " + archive_path + " does not exist.");
321  }
323  throw std::runtime_error("Restoring view or temporary table is not supported.");
324  }
325  // Obtain table schema read lock to prevent modification of the schema during
326  // restoration
327  const auto table_read_lock =
329  // prevent concurrent inserts into table during restoration
330  const auto insert_data_lock =
332 
333  // untar takes time. no grab of cat lock to yield to concurrent CREATE stmts.
334  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
335  // dirs where src files are untarred and dst files are backed up
336  constexpr static const auto temp_data_basename = "_data";
337  constexpr static const auto temp_back_basename = "_back";
338  const auto temp_data_dir = abs_path(global_file_mgr) + "/" + temp_data_basename;
339  const auto temp_back_dir = abs_path(global_file_mgr) + "/" + temp_back_basename;
340  // clean up tmp dirs and files in any case
341  auto tmp_files_cleaner = [&](void*) {
342  run("rm -rf " + temp_data_dir + " " + temp_back_dir);
343  run("rm -f " + abs_path(global_file_mgr) + "/" + table_schema_filename);
344  run("rm -f " + abs_path(global_file_mgr) + "/" + table_oldinfo_filename);
345  run("rm -f " + abs_path(global_file_mgr) + "/" + table_epoch_filename);
346  };
347  std::unique_ptr<decltype(tmp_files_cleaner), decltype(tmp_files_cleaner)> tfc(
348  &tmp_files_cleaner, tmp_files_cleaner);
349  // extract & parse schema
350  const auto schema_str = get_table_schema(archive_path, td->tableName, compression);
351  const auto create_table_stmt =
352  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str);
353  // verify compatibility between source and destination schemas
354  TableDescriptor src_td;
355  std::list<ColumnDescriptor> src_columns;
356  std::vector<Parser::SharedDictionaryDef> shared_dict_defs;
357  create_table_stmt->executeDryRun(session, src_td, src_columns, shared_dict_defs);
358  // - sanity check table-level compatibility
359  if (src_td.hasDeletedCol != td->hasDeletedCol) {
360  // TODO: allow the case, in which src data enables vacuum while
361  // dst doesn't, by simply discarding src $deleted column data.
362  throw std::runtime_error("Incompatible table VACCUM option");
363  }
364  if (src_td.nShards != td->nShards) {
365  // TODO: allow different shard numbers if they have a "GCD",
366  // by splitting/merging src data files before drop into dst.
367  throw std::runtime_error("Unmatched number of table shards");
368  }
369  // - sanity check column-level compatibility (based on column names)
370  const auto dst_columns =
371  cat_->getAllColumnMetadataForTable(td->tableId, false, false, false);
372  if (dst_columns.size() != src_columns.size()) {
373  throw std::runtime_error("Unmatched number of table columns");
374  }
375  for (const auto& [src_cd, dst_cd] : boost::combine(src_columns, dst_columns)) {
376  if (src_cd.columnType.get_type_name() != dst_cd->columnType.get_type_name() ||
377  src_cd.columnType.get_compression_name() !=
378  dst_cd->columnType.get_compression_name()) {
379  throw std::runtime_error("Incompatible types on column " + src_cd.columnName);
380  }
381  }
382  // extract src table column ids (ALL columns incl. system/virtual/phy geo cols)
383  const auto all_src_oldinfo_str =
384  simple_file_cat(archive_path, table_oldinfo_filename, compression);
385  std::vector<std::string> src_oldinfo_strs;
386  boost::algorithm::split(src_oldinfo_strs,
387  all_src_oldinfo_str,
388  boost::is_any_of(" "),
389  boost::token_compress_on);
390  auto all_dst_columns =
391  cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
392  if (src_oldinfo_strs.size() != all_dst_columns.size()) {
393  throw std::runtime_error("Source table has a unmatched number of columns: " +
394  std::to_string(src_oldinfo_strs.size()) + " vs " +
395  std::to_string(all_dst_columns.size()));
396  }
397  // build a map of src column ids and dst column ids, just in case src table has been
398  // ALTERed before and chunk keys of src table needs to be adjusted accordingly.
399  // note: this map is used only for the case of migrating a table and not for restoring
400  // a table. When restoring a table, the two tables must have the same column ids.
401  //
402  // also build a map of src dict paths and dst dict paths for relocating src dicts
403  std::unordered_map<int, int> column_ids_map;
404  std::unordered_map<std::string, std::string> dict_paths_map;
405  // sort inputs of transform in lexical order of column names for correct mappings
406  std::list<std::vector<std::string>> src_oldinfo_tokens;
407  std::transform(
408  src_oldinfo_strs.begin(),
409  src_oldinfo_strs.end(),
410  std::back_inserter(src_oldinfo_tokens),
411  [](const auto& src_oldinfo_str) -> auto {
412  std::vector<std::string> tokens;
414  tokens, src_oldinfo_str, boost::is_any_of(":"), boost::token_compress_on);
415  return tokens;
416  });
417  src_oldinfo_tokens.sort(
418  [](const auto& lhs, const auto& rhs) { return lhs[0].compare(rhs[0]) < 0; });
419  all_dst_columns.sort(
420  [](auto a, auto b) { return a->columnName.compare(b->columnName) < 0; });
421  // transform inputs into the maps
422  std::transform(src_oldinfo_tokens.begin(),
423  src_oldinfo_tokens.end(),
424  all_dst_columns.begin(),
425  std::inserter(column_ids_map, column_ids_map.end()),
426  [&](const auto& tokens, const auto& cd) -> std::pair<int, int> {
427  VLOG(3) << boost::algorithm::join(tokens, ":") << " ==> "
428  << cd->columnName << ":" << cd->columnId;
429  dict_paths_map[tokens[2]] = cat_->getColumnDictDirectory(cd);
430  return {boost::lexical_cast<int>(tokens[1]), cd->columnId};
431  });
432  bool was_table_altered = false;
433  std::for_each(column_ids_map.begin(), column_ids_map.end(), [&](auto& it) {
434  was_table_altered = was_table_altered || it.first != it.second;
435  });
436  VLOG(3) << "was_table_altered = " << was_table_altered;
437  // extract all data files to a temp dir. will swap with dst table dir after all set,
438  // otherwise will corrupt table in case any bad thing happens in the middle.
439  run("rm -rf " + temp_data_dir);
440  run("mkdir -p " + temp_data_dir);
441  run("tar " + compression + " -xvf " + get_quoted_string(archive_path), temp_data_dir);
442  // if table was ever altered after it was created, update column ids in chunk headers.
443  if (was_table_altered) {
444  const auto time_ms = measure<>::execution(
445  [&]() { adjust_altered_table_files(temp_data_dir, column_ids_map); });
446  VLOG(3) << "adjust_altered_table_files: " << time_ms << " ms";
447  }
448  // finally,,, swap table data/dict dirs!
449  const auto data_file_dirs = cat_->getTableDataDirectories(td);
450  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
451  // move current target dirs, if exists, to backup dir
452  std::vector<std::string> both_file_dirs;
453  std::merge(data_file_dirs.begin(),
454  data_file_dirs.end(),
455  dict_file_dirs.begin(),
456  dict_file_dirs.end(),
457  std::back_inserter(both_file_dirs));
458  bool backup_completed = false;
459  try {
460  run("rm -rf " + temp_back_dir);
461  run("mkdir -p " + temp_back_dir);
462  for (const auto& dir : both_file_dirs) {
463  const auto dir_full_path = abs_path(global_file_mgr) + "/" + dir;
464  if (boost::filesystem::is_directory(dir_full_path)) {
465  run("mv " + dir_full_path + " " + temp_back_dir);
466  }
467  }
468  backup_completed = true;
469  // accord src data dirs to dst
471  cat_->getDataMgr().getGlobalFileMgr(), temp_data_dir, data_file_dirs, "table_");
472  // accord src dict dirs to dst
473  for (const auto& dit : dict_paths_map) {
474  if (!dit.first.empty() && !dit.second.empty()) {
475  const auto src_dict_path = temp_data_dir + "/" + dit.first;
476  const auto dst_dict_path = abs_path(global_file_mgr) + "/" + dit.second;
477  run("mv " + src_dict_path + " " + dst_dict_path);
478  }
479  }
480  // throw if sanity test forces a rollback
482  throw std::runtime_error("lol!");
483  }
484  } catch (...) {
485  // once backup is completed, whatever in abs_path(global_file_mgr) is the "src"
486  // dirs that are to be rolled back and discarded
487  if (backup_completed) {
488  run("rm -rf " + boost::algorithm::join(both_file_dirs, " "),
489  abs_path(global_file_mgr));
490  }
491  // complete rollback by recovering original "dst" table dirs from backup dir
492  boost::filesystem::path base_path(temp_back_dir);
493  boost::filesystem::directory_iterator end_it;
494  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
495  run("mv " + fit->path().string() + " .", abs_path(global_file_mgr));
496  }
497  throw;
498  }
499  // set for reloading table from the restored/migrated files
500  const auto epoch = simple_file_cat(archive_path, table_epoch_filename, compression);
502  cat_->getCurrentDB().dbId, td->tableId, boost::lexical_cast<int>(epoch));
503 }
504 
505 // Migrate a table, which doesn't exist in current db, from a tar ball to the db.
506 // This actually creates the table and restores data/dict files from the tar ball.
508  const std::string& table_name,
509  const std::string& archive_path,
510  const std::string& compression) {
511  // replace table name and drop foreign dict references
512  const auto schema_str = get_table_schema(archive_path, table_name, compression);
513  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str)->execute(session);
514  try {
515  restoreTable(
516  session, cat_->getMetadataForTable(table_name), archive_path, compression);
517  } catch (...) {
518  Parser::parseDDL<Parser::DropTableStmt>("statement",
519  "DROP TABLE IF EXISTS " + table_name + ";")
520  ->execute(session);
521  throw;
522  }
523 }
std::string get_table_schema(const std::string &archive_path, const std::string &table, const std::string &compression)
static ReadLock getReadLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:164
std::string getBasePath() const
static constexpr char const * table_schema_filename
std::string tableName
static TimeT::rep execution(F func, Args &&...args)
Definition: sample.cpp:29
std::string abs_path(const File_Namespace::GlobalFileMgr *global_file_mgr)
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:195
#define LOG(tag)
Definition: Logger.h:188
static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:155
void startThread(FuncType &&func, Args &&...args)
void adjust_altered_table_files(const std::string &temp_data_dir, const std::unordered_map< int, int > &column_ids_map)
std::string join(T const &container, std::string const &delim)
#define MAPD_FILE_EXT
Definition: File.h:26
static constexpr char const * table_oldinfo_filename
int32_t getTableEpoch(const int32_t db_id, const int32_t table_id) const
Definition: Catalog.cpp:2688
std::vector< std::string > getTableDataDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:3836
void restoreTable(const Catalog_Namespace::SessionInfo &session, const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
std::string to_string(char const *&&v)
std::vector< std::string > split(std::string_view str, std::string_view delim, std::optional< size_t > maxsplit)
split apart a string into a vector of substrings
std::string getColumnDictDirectory(const ColumnDescriptor *cd) const
Definition: Catalog.cpp:3850
CHECK(cgen_state)
std::string get_quoted_string(const std::string &filename, char quote, char escape)
Quote a string while escaping any existing quotes in the string.
void filename_security_check(const std::string &filename)
Throw exception if security problems found in a filename.
const DBMetadata & getCurrentDB() const
Definition: Catalog.h:194
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
void rename_table_directories(const File_Namespace::GlobalFileMgr *global_file_mgr, const std::string &temp_data_dir, const std::vector< std::string > &target_paths, const std::string &name_prefix)
void setTableEpoch(const int db_id, const int table_id, const int new_epoch)
Definition: Catalog.cpp:2718
std::vector< std::string > getTableDictDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:3866
std::list< const ColumnDescriptor * > getAllColumnMetadataForTable(const int tableId, const bool fetchSystemColumns, const bool fetchVirtualColumns, const bool fetchPhysicalColumns) const
Returns a list of pointers to constant ColumnDescriptor structs for all the columns from a particular...
Definition: Catalog.cpp:1704
Data_Namespace::MemoryLevel persistenceLevel
static constexpr char const * table_epoch_filename
bool g_cluster
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
static bool run
specifies the content in-memory of a row in the table metadata table
std::string simple_file_cat(const std::string &archive_path, const std::string &file_name, const std::string &compression)
std::string dumpSchema(const TableDescriptor *td) const
Definition: Catalog.cpp:3882
int cpu_threads()
Definition: thread_count.h:25
void dumpTable(const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
A selection of helper methods for File I/O.
#define VLOG(n)
Definition: Logger.h:291
bool g_test_rollback_dump_restore
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:526
Catalog_Namespace::Catalog * cat_
Definition: TableArchiver.h:43