OmniSciDB  c0231cc57d
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TableArchiver.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <algorithm>
20 #include <boost/filesystem.hpp>
21 #include <boost/process.hpp>
22 #include <boost/range/combine.hpp>
23 #include <boost/version.hpp>
24 #include <cerrno>
25 #include <cstdio>
26 #include <cstring>
27 #include <exception>
28 #include <list>
29 #include <memory>
30 #include <regex>
31 #include <set>
32 #include <sstream>
33 #include <system_error>
34 
38 #include "LockMgr/LockMgr.h"
39 #include "Logger/Logger.h"
40 #include "Parser/ParserNode.h"
41 #include "Shared/File.h"
42 #include "Shared/StringTransform.h"
44 #include "Shared/measure.h"
45 #include "Shared/thread_count.h"
46 
47 extern bool g_cluster;
49 
50 constexpr static char const* table_schema_filename = "_table.sql";
51 constexpr static char const* table_oldinfo_filename = "_table.oldinfo";
52 constexpr static char const* table_epoch_filename = "_table.epoch";
53 
54 #if BOOST_VERSION < 107300
55 namespace std {
56 
57 template <typename T, typename U>
58 struct tuple_size<boost::tuples::cons<T, U>>
59  : boost::tuples::length<boost::tuples::cons<T, U>> {};
60 template <size_t I, typename T, typename U>
61 struct tuple_element<I, boost::tuples::cons<T, U>>
62  : boost::tuples::element<I, boost::tuples::cons<T, U>> {};
63 
64 } // namespace std
65 #endif
66 
67 namespace {
68 
69 inline auto simple_file_closer = [](FILE* f) { std::fclose(f); };
70 
71 inline std::string abs_path(const File_Namespace::GlobalFileMgr* global_file_mgr) {
72  return boost::filesystem::canonical(global_file_mgr->getBasePath()).string();
73 }
74 
75 inline std::string run(const std::string& cmd, const std::string& chdir = "") {
76  VLOG(3) << "running cmd: " << cmd;
77  int rcode;
78  std::error_code ec;
79  std::string output, errors;
80  const auto time_ms = measure<>::execution([&]() {
81  using namespace boost::process;
82  ipstream stdout, stderr;
83  if (!chdir.empty()) {
84  rcode = system(cmd, std_out > stdout, std_err > stderr, ec, start_dir = chdir);
85  } else {
86  rcode = system(cmd, std_out > stdout, std_err > stderr, ec);
87  }
88  std::ostringstream ss_output, ss_errors;
89  stdout >> ss_output.rdbuf();
90  stderr >> ss_errors.rdbuf();
91  output = ss_output.str();
92  errors = ss_errors.str();
93  });
94  if (rcode || ec) {
95  LOG(ERROR) << "failed cmd: " << cmd;
96  LOG(ERROR) << "exit code: " << rcode;
97  LOG(ERROR) << "error code: " << ec.value() << " - " << ec.message();
98  LOG(ERROR) << "stdout: " << output;
99  LOG(ERROR) << "stderr: " << errors;
100 #if defined(__APPLE__)
101  // osx bsdtar options "--use-compress-program" and "--fast-read" together
102  // run into pipe write error after tar extracts the first occurrence of a
103  // file and closes the read end while the decompression program still writes
104  // to the pipe. bsdtar doesn't handle this situation well like gnu tar does.
105  if (1 == rcode && cmd.find("--fast-read") &&
106  (errors.find("cannot write decoded block") != std::string::npos ||
107  errors.find("Broken pipe") != std::string::npos)) {
108  // ignore this error, or lose speed advantage of "--fast-read" on osx.
109  LOG(ERROR) << "tar error ignored on osx for --fast-read";
110  } else
111 #endif
112  // circumvent tar warning on reading file that is "changed as we read it".
113  // this warning results from reading a table file under concurrent inserts
114  if (1 == rcode && errors.find("changed as we read") != std::string::npos) {
115  LOG(ERROR) << "tar error ignored under concurrent inserts";
116  } else {
117  int error_code;
118  std::string error_message;
119  if (ec) {
120  error_code = ec.value();
121  error_message = ec.message();
122  } else {
123  error_code = rcode;
124  // Show a more concise message for permission errors instead of the default
125  // verbose message. Error logs will still contain all details.
126  if (to_lower(errors).find("permission denied") != std::string::npos) {
127  error_message = "Insufficient file read/write permission.";
128  } else {
129  error_message = errors;
130  }
131  }
132  throw std::runtime_error(
133  "An error occurred while executing an internal command. Error code: " +
134  std::to_string(error_code) + ", message: " + error_message);
135  }
136  } else {
137  VLOG(3) << "finished cmd: " << cmd;
138  VLOG(3) << "time: " << time_ms << " ms";
139  VLOG(3) << "stdout: " << output;
140  }
141  return output;
142 }
143 
144 inline std::string simple_file_cat(const std::string& archive_path,
145  const std::string& file_name,
146  const std::string& compression) {
149 #if defined(__APPLE__)
150  constexpr static auto opt_occurrence = "--fast-read";
151 #else
152  constexpr static auto opt_occurrence = "--occurrence=1";
153 #endif
154  boost::filesystem::path temp_dir =
155  boost::filesystem::temp_directory_path() / boost::filesystem::unique_path();
156  boost::filesystem::create_directories(temp_dir);
157  run("tar " + compression + " -xvf " + get_quoted_string(archive_path) + " " +
158  opt_occurrence + " " + file_name,
159  temp_dir.string());
160  const auto output = run("cat " + (temp_dir / file_name).string());
161  boost::filesystem::remove_all(temp_dir);
162  return output;
163 }
164 
165 inline std::string get_table_schema(const std::string& archive_path,
166  const std::string& table,
167  const std::string& compression) {
168  const auto schema_str =
169  simple_file_cat(archive_path, table_schema_filename, compression);
170  std::regex regex("@T");
171  return std::regex_replace(schema_str, regex, table);
172 }
173 
174 // If a table was altered there may be a mapping from old column ids to new ones these
175 // values need to be replaced in the page headers.
177  const boost::filesystem::path& path,
178  const std::unordered_map<int, int>& column_ids_map,
179  const int32_t table_epoch) {
180  const std::string file_path = path.string();
181  const std::string file_name = path.filename().string();
182  std::vector<std::string> tokens;
183  boost::split(tokens, file_name, boost::is_any_of("."));
184 
185  // ref. FileMgr::init for hint of data file name layout
186  if (tokens.size() <= 2 || !(DATA_FILE_EXT == "." + tokens[2] || tokens[2] == "mapd")) {
187  // We are only interested in files in the form <id>.<page_size>.<DATA_FILE_EXT>
188  return;
189  }
190 
191  const auto page_size = boost::lexical_cast<int64_t>(tokens[1]);
192  const auto file_size = boost::filesystem::file_size(file_path);
193  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
194  std::fopen(file_path.c_str(), "r+"), simple_file_closer);
195  if (!fp) {
196  throw std::runtime_error("Failed to open " + file_path +
197  " for update: " + std::strerror(errno));
198  }
199  // TODO(Misiu): Rather than reference an exernal layout we should de-duplicate this
200  // page-reading code in a single location. This will also reduce the need for comments
201  // below.
202  // ref. FileInfo::openExistingFile for hint of chunk header layout
203  for (size_t page = 0; page < file_size / page_size; ++page) {
204  int32_t header_info[8];
205  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
206  throw std::runtime_error("Failed to seek to page# " + std::to_string(page) +
207  file_path + " for read: " + std::strerror(errno));
208  }
209  if (1 != fread(header_info, sizeof header_info, 1, fp.get())) {
210  throw std::runtime_error("Failed to read " + file_path + ": " +
211  std::strerror(errno));
212  }
213  if (const auto header_size = header_info[0]; header_size > 0) {
214  // header_info[1] is the page's db_id; but can also be used as an "is deleted"
215  // indicator if negative.
216  auto& contingent = header_info[1];
217  // header_info[2] is the page's table_id; but can also used to store the page's
218  // epoch since the FileMgr stores table_id information separately.
219  auto& epoch = header_info[2];
220  auto& col_id = header_info[3];
222  table_epoch, epoch, contingent)) {
223  continue;
224  }
225  auto column_map_it = column_ids_map.find(col_id);
226  CHECK(column_map_it != column_ids_map.end()) << "could not find " << col_id;
227  // If a header contains a column id that is remapped to new location
228  // then write that change to the file.
229  if (const auto dest_col_id = column_map_it->second; col_id != dest_col_id) {
230  col_id = dest_col_id;
231  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
232  throw std::runtime_error("Failed to seek to page# " + std::to_string(page) +
233  file_path + " for write: " + std::strerror(errno));
234  }
235  if (1 != fwrite(header_info, sizeof header_info, 1, fp.get())) {
236  throw std::runtime_error("Failed to write " + file_path + ": " +
237  std::strerror(errno));
238  }
239  }
240  }
241  }
242 }
243 
244 // Adjust column ids in chunk keys in a table's data files under a temp_data_dir,
245 // including files of all shards of the table. Can be slow for big files but should
246 // be scale faster than refragmentizing. Table altering should be rare for olap.
247 void adjust_altered_table_files(const int32_t table_epoch,
248  const std::string& temp_data_dir,
249  const std::unordered_map<int, int>& column_ids_map) {
250  boost::filesystem::path base_path(temp_data_dir);
251  boost::filesystem::recursive_directory_iterator end_it;
253  for (boost::filesystem::recursive_directory_iterator fit(base_path); fit != end_it;
254  ++fit) {
255  if (!boost::filesystem::is_symlink(fit->path()) &&
256  boost::filesystem::is_regular_file(fit->status())) {
257  thread_controller.startThread(
258  rewrite_column_ids_in_page_headers, fit->path(), column_ids_map, table_epoch);
259  thread_controller.checkThreadsStatus();
260  }
261  }
262  thread_controller.finish();
263 }
264 
265 void delete_old_symlinks(const std::string& table_data_dir) {
266  std::vector<boost::filesystem::path> symlinks;
267  for (boost::filesystem::directory_iterator it(table_data_dir), end_it; it != end_it;
268  it++) {
269  if (boost::filesystem::is_symlink(it->path())) {
270  symlinks.emplace_back(it->path());
271  }
272  }
273  for (const auto& symlink : symlinks) {
274  boost::filesystem::remove_all(symlink);
275  }
276 }
277 
278 void add_data_file_symlinks(const std::string& table_data_dir) {
279  std::map<boost::filesystem::path, boost::filesystem::path> old_to_new_paths;
280  for (boost::filesystem::directory_iterator it(table_data_dir), end_it; it != end_it;
281  it++) {
282  const auto path = boost::filesystem::canonical(it->path());
283  if (path.extension().string() == DATA_FILE_EXT) {
284  auto old_path = path;
285  old_path.replace_extension(File_Namespace::kLegacyDataFileExtension);
286  // Add a symlink to data file, if one does not exist.
287  if (!boost::filesystem::exists(old_path)) {
288  old_to_new_paths[old_path] = path;
289  }
290  }
291  }
292  for (const auto& [old_path, new_path] : old_to_new_paths) {
293  boost::filesystem::create_symlink(new_path.filename(), old_path);
294  }
295 }
296 
298  const std::string& temp_data_dir,
299  const std::vector<std::string>& target_paths,
300  const std::string& name_prefix) {
301  boost::filesystem::path base_path(temp_data_dir);
302  boost::filesystem::directory_iterator end_it;
303  int target_path_index = 0;
304  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
305  if (!boost::filesystem::is_regular_file(fit->status())) {
306  const std::string file_path = fit->path().string();
307  const std::string file_name = fit->path().filename().string();
308  if (boost::istarts_with(file_name, name_prefix)) {
309  const std::string target_path =
310  abs_path(global_file_mgr) + "/" + target_paths[target_path_index++];
311  if (std::rename(file_path.c_str(), target_path.c_str())) {
312  throw std::runtime_error("Failed to rename file " + file_path + " to " +
313  target_path + ": " + std::strerror(errno));
314  }
315  // Delete any old/invalid symlinks contained in table dump.
316  delete_old_symlinks(target_path);
318  // For post-rebrand table dumps, symlinks need to be added here, since file mgr
319  // migration would already have been executed for the dumped table.
320  add_data_file_symlinks(target_path);
321  }
322  }
323  }
324 }
325 
326 } // namespace
327 
329  const std::string& archive_path,
330  const std::string& compression) {
331  if (td->is_system_table) {
332  throw std::runtime_error("Dumping a system table is not supported.");
333  }
336  if (g_cluster) {
337  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
338  }
339  if (boost::filesystem::exists(archive_path)) {
340  throw std::runtime_error("Archive " + archive_path + " already exists.");
341  }
343  throw std::runtime_error("Dumping view or temporary table is not supported.");
344  }
345  // collect paths of files to archive
346  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
347  std::vector<std::string> file_paths;
348  auto file_writer = [&file_paths, global_file_mgr](const std::string& file_name,
349  const std::string& file_type,
350  const std::string& file_data) {
351  const auto file_path = abs_path(global_file_mgr) + "/" + file_name;
352  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
353  std::fopen(file_path.c_str(), "w"), simple_file_closer);
354  if (!fp) {
355  throw std::runtime_error("Failed to create " + file_type + " file '" + file_path +
356  "': " + std::strerror(errno));
357  }
358  if (std::fwrite(file_data.data(), 1, file_data.size(), fp.get()) < file_data.size()) {
359  throw std::runtime_error("Failed to write " + file_type + " file '" + file_path +
360  "': " + std::strerror(errno));
361  }
362  file_paths.push_back(file_name);
363  };
364 
365  const auto table_name = td->tableName;
366  {
367  // - gen schema file
368  const auto schema_str = cat_->dumpSchema(td);
369  file_writer(table_schema_filename, "table schema", schema_str);
370  // - gen column-old-info file
371  const auto cds = cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
372  std::vector<std::string> column_oldinfo;
373  std::transform(cds.begin(),
374  cds.end(),
375  std::back_inserter(column_oldinfo),
376  [&](const auto cd) -> std::string {
377  return cd->columnName + ":" + std::to_string(cd->columnId) + ":" +
379  });
380  const auto column_oldinfo_str = boost::algorithm::join(column_oldinfo, " ");
381  file_writer(table_oldinfo_filename, "table old info", column_oldinfo_str);
382  // - gen table epoch
383  const auto epoch = cat_->getTableEpoch(cat_->getCurrentDB().dbId, td->tableId);
384  file_writer(table_epoch_filename, "table epoch", std::to_string(epoch));
385  // - collect table data file paths ...
386  const auto data_file_dirs = cat_->getTableDataDirectories(td);
387  file_paths.insert(file_paths.end(), data_file_dirs.begin(), data_file_dirs.end());
388  // - collect table dict file paths ...
389  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
390  file_paths.insert(file_paths.end(), dict_file_dirs.begin(), dict_file_dirs.end());
391  // tar takes time. release cat lock to yield the cat to concurrent CREATE statements.
392  }
393  // run tar to archive the files ... this may take a while !!
394  run("tar " + compression + " -cvf " + get_quoted_string(archive_path) + " " +
395  boost::algorithm::join(file_paths, " "),
396  abs_path(global_file_mgr));
397 }
398 
399 // Restore data and dict files of a table from a tgz archive.
401  const TableDescriptor* td,
402  const std::string& archive_path,
403  const std::string& compression) {
406  if (g_cluster) {
407  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
408  }
409  if (!boost::filesystem::exists(archive_path)) {
410  throw std::runtime_error("Archive " + archive_path + " does not exist.");
411  }
413  throw std::runtime_error("Restoring view or temporary table is not supported.");
414  }
415  // Obtain table schema read lock to prevent modification of the schema during
416  // restoration
417  const auto table_read_lock =
419  // prevent concurrent inserts into table during restoration
420  const auto insert_data_lock =
422 
423  // untar takes time. no grab of cat lock to yield to concurrent CREATE stmts.
424  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
425  // dirs where src files are untarred and dst files are backed up
426  constexpr static const auto temp_data_basename = "_data";
427  constexpr static const auto temp_back_basename = "_back";
428  const auto temp_data_dir = abs_path(global_file_mgr) + "/" + temp_data_basename;
429  const auto temp_back_dir = abs_path(global_file_mgr) + "/" + temp_back_basename;
430  // clean up tmp dirs and files in any case
431  auto tmp_files_cleaner = [&](void*) {
432  run("rm -rf " + temp_data_dir + " " + temp_back_dir);
433  run("rm -f " + abs_path(global_file_mgr) + "/" + table_schema_filename);
434  run("rm -f " + abs_path(global_file_mgr) + "/" + table_oldinfo_filename);
435  run("rm -f " + abs_path(global_file_mgr) + "/" + table_epoch_filename);
436  };
437  std::unique_ptr<decltype(tmp_files_cleaner), decltype(tmp_files_cleaner)> tfc(
438  &tmp_files_cleaner, tmp_files_cleaner);
439 
440  // extract & parse schema
441  const auto schema_str = get_table_schema(archive_path, td->tableName, compression);
442  std::unique_ptr<Parser::Stmt> stmt = Parser::create_stmt_for_query(schema_str, session);
443  const auto create_table_stmt = dynamic_cast<Parser::CreateTableStmt*>(stmt.get());
444  CHECK(create_table_stmt);
445 
446  // verify compatibility between source and destination schemas
447  TableDescriptor src_td;
448  std::list<ColumnDescriptor> src_columns;
449  std::vector<Parser::SharedDictionaryDef> shared_dict_defs;
450  create_table_stmt->executeDryRun(session, src_td, src_columns, shared_dict_defs);
451  // - sanity check table-level compatibility
452  if (src_td.hasDeletedCol != td->hasDeletedCol) {
453  // TODO: allow the case, in which src data enables vacuum while
454  // dst doesn't, by simply discarding src $deleted column data.
455  throw std::runtime_error("Incompatible table VACCUM option");
456  }
457  if (src_td.nShards != td->nShards) {
458  // TODO: allow different shard numbers if they have a "GCD",
459  // by splitting/merging src data files before drop into dst.
460  throw std::runtime_error("Unmatched number of table shards");
461  }
462  // - sanity check column-level compatibility (based on column names)
463  const auto dst_columns =
464  cat_->getAllColumnMetadataForTable(td->tableId, false, false, false);
465  if (dst_columns.size() != src_columns.size()) {
466  throw std::runtime_error("Unmatched number of table columns");
467  }
468  for (const auto& [src_cd, dst_cd] : boost::combine(src_columns, dst_columns)) {
469  if (src_cd.columnType.get_type_name() != dst_cd->columnType.get_type_name() ||
470  src_cd.columnType.get_compression_name() !=
471  dst_cd->columnType.get_compression_name()) {
472  throw std::runtime_error("Incompatible types on column " + src_cd.columnName);
473  }
474  }
475  // extract src table column ids (ALL columns incl. system/virtual/phy geo cols)
476  const auto all_src_oldinfo_str =
477  simple_file_cat(archive_path, table_oldinfo_filename, compression);
478  std::vector<std::string> src_oldinfo_strs;
479  boost::algorithm::split(src_oldinfo_strs,
480  all_src_oldinfo_str,
481  boost::is_any_of(" "),
482  boost::token_compress_on);
483  auto all_dst_columns =
484  cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
485  if (src_oldinfo_strs.size() != all_dst_columns.size()) {
486  throw std::runtime_error("Source table has a unmatched number of columns: " +
487  std::to_string(src_oldinfo_strs.size()) + " vs " +
488  std::to_string(all_dst_columns.size()));
489  }
490  // build a map of src column ids and dst column ids, just in case src table has been
491  // ALTERed before and chunk keys of src table needs to be adjusted accordingly.
492  // note: this map is used only for the case of migrating a table and not for restoring
493  // a table. When restoring a table, the two tables must have the same column ids.
494  //
495  // also build a map of src dict paths and dst dict paths for relocating src dicts
496  std::unordered_map<int, int> column_ids_map;
497  std::unordered_map<std::string, std::string> dict_paths_map;
498  // sort inputs of transform in lexical order of column names for correct mappings
499  std::list<std::vector<std::string>> src_oldinfo_tokens;
501  src_oldinfo_strs.begin(),
502  src_oldinfo_strs.end(),
503  std::back_inserter(src_oldinfo_tokens),
504  [](const auto& src_oldinfo_str) -> auto {
505  std::vector<std::string> tokens;
507  tokens, src_oldinfo_str, boost::is_any_of(":"), boost::token_compress_on);
508  return tokens;
509  });
510  src_oldinfo_tokens.sort(
511  [](const auto& lhs, const auto& rhs) { return lhs[0].compare(rhs[0]) < 0; });
512  all_dst_columns.sort(
513  [](auto a, auto b) { return a->columnName.compare(b->columnName) < 0; });
514  // transform inputs into the maps
515  std::transform(src_oldinfo_tokens.begin(),
516  src_oldinfo_tokens.end(),
517  all_dst_columns.begin(),
518  std::inserter(column_ids_map, column_ids_map.end()),
519  [&](const auto& tokens, const auto& cd) -> std::pair<int, int> {
520  VLOG(3) << boost::algorithm::join(tokens, ":") << " ==> "
521  << cd->columnName << ":" << cd->columnId;
522  dict_paths_map[tokens[2]] = cat_->getColumnDictDirectory(cd);
523  return {boost::lexical_cast<int>(tokens[1]), cd->columnId};
524  });
525  bool was_table_altered = false;
526  std::for_each(column_ids_map.begin(), column_ids_map.end(), [&](auto& it) {
527  was_table_altered = was_table_altered || it.first != it.second;
528  });
529  VLOG(3) << "was_table_altered = " << was_table_altered;
530  // extract all data files to a temp dir. will swap with dst table dir after all set,
531  // otherwise will corrupt table in case any bad thing happens in the middle.
532  run("rm -rf " + temp_data_dir);
533  run("mkdir -p " + temp_data_dir);
534  run("tar " + compression + " -xvf " + get_quoted_string(archive_path), temp_data_dir);
535 
536  // if table was ever altered after it was created, update column ids in chunk headers.
537  if (was_table_altered) {
538  const auto epoch = boost::lexical_cast<int32_t>(
539  simple_file_cat(archive_path, table_epoch_filename, compression));
540  const auto time_ms = measure<>::execution(
541  [&]() { adjust_altered_table_files(epoch, temp_data_dir, column_ids_map); });
542  VLOG(3) << "adjust_altered_table_files: " << time_ms << " ms";
543  }
544  // finally,,, swap table data/dict dirs!
545  const auto data_file_dirs = cat_->getTableDataDirectories(td);
546  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
547  // move current target dirs, if exists, to backup dir
548  std::vector<std::string> both_file_dirs;
549  std::merge(data_file_dirs.begin(),
550  data_file_dirs.end(),
551  dict_file_dirs.begin(),
552  dict_file_dirs.end(),
553  std::back_inserter(both_file_dirs));
554  bool backup_completed = false;
555  try {
556  run("rm -rf " + temp_back_dir);
557  run("mkdir -p " + temp_back_dir);
558  for (const auto& dir : both_file_dirs) {
559  const auto dir_full_path = abs_path(global_file_mgr) + "/" + dir;
560  if (boost::filesystem::is_directory(dir_full_path)) {
561  run("mv " + dir_full_path + " " + temp_back_dir);
562  }
563  }
564  backup_completed = true;
565  // Move table directories from temp dir to main data directory.
566  rename_table_directories(global_file_mgr, temp_data_dir, data_file_dirs, "table_");
567  // Move dictionaries from temp dir to main dir.
568  for (const auto& dit : dict_paths_map) {
569  if (!dit.first.empty() && !dit.second.empty()) {
570  const auto src_dict_path = temp_data_dir + "/" + dit.first;
571  const auto dst_dict_path = abs_path(global_file_mgr) + "/" + dit.second;
572  run("mv " + src_dict_path + " " + dst_dict_path);
573  }
574  }
575  // throw if sanity test forces a rollback
577  throw std::runtime_error("lol!");
578  }
579  } catch (...) {
580  // once backup is completed, whatever in abs_path(global_file_mgr) is the "src"
581  // dirs that are to be rolled back and discarded
582  if (backup_completed) {
583  run("rm -rf " + boost::algorithm::join(both_file_dirs, " "),
584  abs_path(global_file_mgr));
585  }
586  // complete rollback by recovering original "dst" table dirs from backup dir
587  boost::filesystem::path base_path(temp_back_dir);
588  boost::filesystem::directory_iterator end_it;
589  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
590  run("mv " + fit->path().string() + " .", abs_path(global_file_mgr));
591  }
592  throw;
593  }
594  // set for reloading table from the restored/migrated files
595  const auto epoch = simple_file_cat(archive_path, table_epoch_filename, compression);
597  cat_->getCurrentDB().dbId, td->tableId, boost::lexical_cast<int>(epoch));
598 }
599 
600 // Migrate a table, which doesn't exist in current db, from a tar ball to the db.
601 // This actually creates the table and restores data/dict files from the tar ball.
603  const std::string& table_name,
604  const std::string& archive_path,
605  const std::string& compression) {
606  // replace table name and drop foreign dict references
607  const auto schema_str = get_table_schema(archive_path, table_name, compression);
608  std::unique_ptr<Parser::Stmt> stmt = Parser::create_stmt_for_query(schema_str, session);
609  const auto create_table_stmt = dynamic_cast<Parser::CreateTableStmt*>(stmt.get());
610  CHECK(create_table_stmt);
611  create_table_stmt->execute(session, false /*read-only*/);
612 
613  try {
614  restoreTable(
615  session, cat_->getMetadataForTable(table_name), archive_path, compression);
616  } catch (...) {
617  const auto schema_str = "DROP TABLE IF EXISTS " + table_name + ";";
618  std::unique_ptr<Parser::Stmt> stmt =
619  Parser::create_stmt_for_query(schema_str, session);
620  const auto drop_table_stmt = dynamic_cast<Parser::DropTableStmt*>(stmt.get());
621  CHECK(drop_table_stmt);
622  drop_table_stmt->execute(session, false /*read-only*/);
623 
624  throw;
625  }
626 }
std::string to_lower(const std::string &str)
std::string get_table_schema(const std::string &archive_path, const std::string &table, const std::string &compression)
void delete_old_symlinks(const std::string &table_data_dir)
std::string getBasePath() const
static WriteLock getWriteLockForTable(Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:225
std::string getColumnDictDirectory(const ColumnDescriptor *cd, bool file_name_only=true) const
Definition: Catalog.cpp:4981
static constexpr char const * table_schema_filename
std::string tableName
static TimeT::rep execution(F func, Args &&...args)
Definition: sample.cpp:29
This file includes the class specification for the FILE manager (FileMgr), and related data structure...
void rewrite_column_ids_in_page_headers(const boost::filesystem::path &path, const std::unordered_map< int, int > &column_ids_map, const int32_t table_epoch)
std::string abs_path(const File_Namespace::GlobalFileMgr *global_file_mgr)
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:243
#define LOG(tag)
Definition: Logger.h:216
void startThread(FuncType &&func, Args &&...args)
std::string join(T const &container, std::string const &delim)
This file includes the class specification for the FILE manager (FileMgr), and related data structure...
#define DATA_FILE_EXT
Definition: File.h:25
static constexpr char const * table_oldinfo_filename
int32_t getTableEpoch(const int32_t db_id, const int32_t table_id) const
Definition: Catalog.cpp:3342
std::vector< std::string > getTableDataDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:4967
void restoreTable(const Catalog_Namespace::SessionInfo &session, const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
constexpr double f
Definition: Utm.h:31
std::string to_string(char const *&&v)
std::vector< std::string > split(std::string_view str, std::string_view delim, std::optional< size_t > maxsplit)
split apart a string into a vector of substrings
constexpr double a
Definition: Utm.h:32
Classes representing a parse tree.
std::string get_quoted_string(const std::string &filename, char quote, char escape)
Quote a string while escaping any existing quotes in the string.
const DBMetadata & getCurrentDB() const
Definition: Catalog.h:242
void rename_table_directories(const File_Namespace::GlobalFileMgr *global_file_mgr, const std::string &temp_data_dir, const std::vector< std::string > &target_paths, const std::string &name_prefix)
::FILE * fopen(const char *filename, const char *mode)
Definition: heavyai_fs.cpp:74
OUTPUT transform(INPUT const &input, FUNC const &func)
Definition: misc.h:296
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
void validate_allowed_file_path(const std::string &file_path, const DataTransferType data_transfer_type, const bool allow_wildcards)
Definition: DdlUtils.cpp:785
void setTableEpoch(const int db_id, const int table_id, const int new_epoch)
Definition: Catalog.cpp:3410
std::vector< std::string > getTableDictDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:5002
static ReadLock getReadLockForTable(Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:238
bool is_page_deleted_with_checkpoint(int32_t table_epoch, int32_t page_epoch, int32_t contingent)
Definition: FileInfo.cpp:271
std::list< const ColumnDescriptor * > getAllColumnMetadataForTable(const int tableId, const bool fetchSystemColumns, const bool fetchVirtualColumns, const bool fetchPhysicalColumns) const
Returns a list of pointers to constant ColumnDescriptor structs for all the columns from a particular...
Definition: Catalog.cpp:2228
Data_Namespace::MemoryLevel persistenceLevel
void adjust_altered_table_files(const int32_t table_epoch, const std::string &temp_data_dir, const std::unordered_map< int, int > &column_ids_map)
static constexpr char const * table_epoch_filename
static void renameAndSymlinkLegacyFiles(const std::string &table_data_dir)
Definition: FileMgr.cpp:1125
#define CHECK(condition)
Definition: Logger.h:222
bool g_cluster
void add_data_file_symlinks(const std::string &table_data_dir)
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
static bool run
std::string simple_file_cat(const std::string &archive_path, const std::string &file_name, const std::string &compression)
std::string dumpSchema(const TableDescriptor *td) const
Definition: Catalog.cpp:5033
int cpu_threads()
Definition: thread_count.h:25
size_t file_size(const int fd)
Definition: heavyai_fs.cpp:33
void dumpTable(const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
constexpr auto kLegacyDataFileExtension
Definition: File.h:36
std::unique_ptr< Parser::Stmt > create_stmt_for_query(const std::string &queryStr, const Catalog_Namespace::SessionInfo &session_info)
A selection of helper methods for File I/O.
#define VLOG(n)
Definition: Logger.h:316
bool g_test_rollback_dump_restore
Catalog_Namespace::Catalog * cat_
Definition: TableArchiver.h:43