OmniSciDB  a987f07e93
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MetaConnect.java
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.mapd.metadata;
18 
19 import com.google.common.collect.ImmutableList;
20 import com.google.gson.Gson;
21 import com.google.gson.JsonArray;
22 import com.google.gson.JsonElement;
23 import com.google.gson.JsonObject;
29 
30 import org.apache.calcite.schema.Table;
31 import org.apache.thrift.TException;
32 import org.apache.thrift.protocol.TBinaryProtocol;
33 import org.apache.thrift.protocol.TProtocol;
34 import org.apache.thrift.transport.TServerSocket;
35 import org.apache.thrift.transport.TSocket;
36 import org.apache.thrift.transport.TTransport;
37 import org.apache.thrift.transport.TTransportException;
38 import org.slf4j.Logger;
39 import org.slf4j.LoggerFactory;
40 
41 import java.io.File;
42 import java.io.FileInputStream;
43 import java.io.IOException;
44 import java.sql.Connection;
45 import java.sql.DriverManager;
46 import java.sql.ResultSet;
47 import java.sql.SQLException;
48 import java.sql.Statement;
49 import java.util.ArrayList;
50 import java.util.HashSet;
51 import java.util.List;
52 import java.util.Map;
53 import java.util.Map.Entry;
54 import java.util.Set;
55 import java.util.concurrent.ConcurrentHashMap;
56 
57 import ai.heavy.thrift.server.Heavy;
58 import ai.heavy.thrift.server.TColumnType;
59 import ai.heavy.thrift.server.TDBException;
60 import ai.heavy.thrift.server.TDBInfo;
61 import ai.heavy.thrift.server.TDatumType;
62 import ai.heavy.thrift.server.TEncodingType;
63 import ai.heavy.thrift.server.TTableDetails;
64 import ai.heavy.thrift.server.TTypeInfo;
65 
66 public class MetaConnect {
67  final static Logger HEAVYDBLOGGER = LoggerFactory.getLogger(MetaConnect.class);
68  private final String dataDir;
69  private final String default_db;
70  private final HeavyDBUser currentUser;
71  private final int dbPort;
72  private Connection catConn;
73  private final HeavyDBParser parser;
74 
75  private static final int KBOOLEAN = 1;
76  private static final int KCHAR = 2;
77  private static final int KVARCHAR = 3;
78  private static final int KNUMERIC = 4;
79  private static final int KDECIMAL = 5;
80  private static final int KINT = 6;
81  private static final int KSMALLINT = 7;
82  private static final int KFLOAT = 8;
83  private static final int KDOUBLE = 9;
84  private static final int KTIME = 10;
85  private static final int KTIMESTAMP = 11;
86  private static final int KBIGINT = 12;
87  private static final int KTEXT = 13;
88  private static final int KDATE = 14;
89  private static final int KARRAY = 15;
90  private static final int KINTERVAL_DAY_TIME = 16;
91  private static final int KINTERVAL_YEAR_MONTH = 17;
92  private static final int KPOINT = 18;
93  private static final int KLINESTRING = 19;
94  private static final int KPOLYGON = 20;
95  private static final int KMULTIPOLYGON = 21;
96  private static final int KTINYINT = 22;
97  private static final int KMULTILINESTRING = 30;
98  private static final int KMULTIPOINT = 31;
99 
100  private static final String CATALOG_DIR_NAME = "catalogs";
101  private static volatile Map<String, Set<String>> DATABASE_TO_TABLES =
102  new ConcurrentHashMap<>();
103  private static volatile Map<List<String>, Table> DB_TABLE_DETAILS =
104  new ConcurrentHashMap<>();
106 
107  public MetaConnect(int dbPort,
108  String dataDir,
109  HeavyDBUser currentHeavyDBUser,
112  String db) {
113  this.dataDir = dataDir;
114  if (db != null) {
115  this.default_db = db;
116  } else {
117  if (currentHeavyDBUser != null) {
118  this.default_db = currentHeavyDBUser.getDB();
119  } else {
120  this.default_db = null;
121  }
122  }
123  this.currentUser = currentHeavyDBUser;
124  this.dbPort = dbPort;
125  this.parser = parser;
126  this.sock_transport_properties = skT;
127 
128  // check to see if we have a populated DATABASE_TO_TABLES structure
129  // first time in we need to make sure this gets populated
130  // It is OK to use a MetaConnect without a user
131  // but it should not attempt to populate the DB
132  if (currentUser != null && DATABASE_TO_TABLES.size() == 0) {
133  // get all databases
135  }
136  }
137 
138  public MetaConnect(int dbPort,
139  String dataDir,
140  HeavyDBUser currentHeavyDBUser,
143  this(dbPort, dataDir, currentHeavyDBUser, parser, skT, null);
144  }
145 
146  public List<String> getDatabases() {
147  List<String> dbList = new ArrayList<String>(DATABASE_TO_TABLES.size());
148  for (String db : DATABASE_TO_TABLES.keySet()) {
149  dbList.add(db);
150  }
151  return dbList;
152  }
153 
154  private void connectToCatalog(String catalog) {
155  try {
156  // try {
157  Class.forName("org.sqlite.JDBC");
158  } catch (ClassNotFoundException ex) {
159  String err = "Could not find class for metadata connection; DB: '" + catalog
160  + "' data dir '" + dataDir + "', error was " + ex.getMessage();
161  HEAVYDBLOGGER.error(err);
162  throw new RuntimeException(err);
163  }
164  String connectURL = "jdbc:sqlite:" + dataDir + "/" + CATALOG_DIR_NAME + "/" + catalog;
165  try {
166  catConn = DriverManager.getConnection(connectURL);
167  } catch (SQLException ex) {
168  String err = "Could not establish a connection for metadata; DB: '" + catalog
169  + "' data dir '" + dataDir + "', error was " + ex.getMessage();
170  HEAVYDBLOGGER.error(err);
171  throw new RuntimeException(err);
172  }
173  HEAVYDBLOGGER.debug("Opened database successfully");
174  }
175 
176  private void disconnectFromCatalog() {
177  try {
178  catConn.close();
179  } catch (SQLException ex) {
180  String err = "Could not disconnect from metadata "
181  + " data dir '" + dataDir + "', error was " + ex.getMessage();
182  HEAVYDBLOGGER.error(err);
183  throw new RuntimeException(err);
184  }
185  }
186 
187  private void connectToDBCatalog() {
189  }
190 
191  public Table getTable(String tableName) {
192  List<String> dbTable =
193  ImmutableList.of(default_db.toUpperCase(), tableName.toUpperCase());
194  Table cTable = DB_TABLE_DETAILS.get(dbTable);
195  if (cTable != null) {
196  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get table " + tableName
197  + " details " + cTable);
198  return cTable;
199  }
200 
201  TTableDetails td = get_table_details(tableName);
202 
203  if (td.getView_sql() == null || td.getView_sql().isEmpty()) {
204  HEAVYDBLOGGER.debug("Processing a table");
205  Table rTable = new HeavyDBTable(td);
206  DB_TABLE_DETAILS.putIfAbsent(dbTable, rTable);
207  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get table " + tableName
208  + " details " + rTable + " Not in buffer");
209  return rTable;
210  } else {
211  HEAVYDBLOGGER.debug("Processing a view");
212  Table rTable = new HeavyDBView(getViewSql(tableName), td, parser);
213  DB_TABLE_DETAILS.putIfAbsent(dbTable, rTable);
214  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get view " + tableName
215  + " details " + rTable + " Not in buffer");
216  return rTable;
217  }
218  }
219 
220  public Set<String> getTables() {
221  Set<String> mSet = DATABASE_TO_TABLES.get(default_db.toUpperCase());
222  if (mSet != null && mSet.size() > 0) {
223  HEAVYDBLOGGER.debug("Metaconnect DB getTables " + default_db + " tables " + mSet);
224  return mSet;
225  }
226 
227  if (dbPort == -1) {
228  // use sql
230  Set<String> ts = getTables_SQL();
232  DATABASE_TO_TABLES.put(default_db.toUpperCase(), ts);
233  HEAVYDBLOGGER.debug(
234  "Metaconnect DB getTables " + default_db + " tables " + ts + " from catDB");
235  return ts;
236  }
237  // use thrift direct to local server
238  try {
239  TProtocol protocol = null;
240  TTransport transport =
241  sock_transport_properties.openClientTransport("localhost", dbPort);
242  if (!transport.isOpen()) transport.open();
243  protocol = new TBinaryProtocol(transport);
244 
245  Heavy.Client client = new Heavy.Client(protocol);
246  List<String> tablesList =
247  client.get_tables_for_database(currentUser.getSession(), default_db);
248  Set<String> ts = new HashSet<String>(tablesList.size());
249  for (String tableName : tablesList) {
250  ts.add(tableName);
251  }
252 
253  transport.close();
254  DATABASE_TO_TABLES.put(default_db.toUpperCase(), ts);
255  HEAVYDBLOGGER.debug("Metaconnect DB getTables " + default_db + " tables " + ts
256  + " from server");
257  return ts;
258 
259  } catch (TTransportException ex) {
260  HEAVYDBLOGGER.error("TTransportException on port [" + dbPort + "]");
261  HEAVYDBLOGGER.error(ex.toString());
262  throw new RuntimeException(ex.toString());
263  } catch (TDBException ex) {
264  HEAVYDBLOGGER.error(ex.getError_msg());
265  throw new RuntimeException(ex.getError_msg());
266  } catch (TException ex) {
267  HEAVYDBLOGGER.error(ex.toString());
268  throw new RuntimeException(ex.toString());
269  }
270  }
271 
272  private Set<String> getTables_SQL() {
274  Set<String> tableSet = new HashSet<String>();
275  Statement stmt = null;
276  ResultSet rs = null;
277  String sqlText = "";
278  try {
279  stmt = catConn.createStatement();
280 
281  // get the tables
282  rs = stmt.executeQuery("SELECT name FROM mapd_tables ");
283  while (rs.next()) {
284  tableSet.add(rs.getString("name"));
285  /*--*/
286  HEAVYDBLOGGER.debug("Object name = " + rs.getString("name"));
287  }
288  rs.close();
289  stmt.close();
290 
291  } catch (Exception e) {
292  String err = "error trying to get all the tables, error was " + e.getMessage();
293  HEAVYDBLOGGER.error(err);
294  throw new RuntimeException(err);
295  }
297 
298  try {
299  // open temp table json file
300  final String filePath =
301  dataDir + "/" + CATALOG_DIR_NAME + "/" + default_db + "_temp_tables.json";
302  HEAVYDBLOGGER.debug("Opening temp table file at " + filePath);
303  String tempTablesJsonStr;
304  try {
305  File tempTablesFile = new File(filePath);
306  FileInputStream tempTablesStream = new FileInputStream(tempTablesFile);
307  byte[] data = new byte[(int) tempTablesFile.length()];
308  tempTablesStream.read(data);
309  tempTablesStream.close();
310 
311  tempTablesJsonStr = new String(data, "UTF-8");
312  } catch (java.io.FileNotFoundException e) {
313  return tableSet;
314  }
315 
316  Gson gson = new Gson();
317  JsonObject fileParentObject = gson.fromJson(tempTablesJsonStr, JsonObject.class);
318  for (Entry<String, JsonElement> member : fileParentObject.entrySet()) {
319  String tableName = member.getKey();
320  tableSet.add(tableName);
321  /*--*/
322  HEAVYDBLOGGER.debug("Temp table object name = " + tableName);
323  }
324 
325  } catch (Exception e) {
326  String err = "error trying to load temporary tables from json file, error was "
327  + e.getMessage();
328  HEAVYDBLOGGER.error(err);
329  throw new RuntimeException(err);
330  }
331 
332  return tableSet;
333  }
334 
335  public TTableDetails get_table_details(String tableName) {
336  if (dbPort == -1) {
337  // use sql
339  TTableDetails td = get_table_detail_SQL(tableName);
341  return td;
342  }
343  try {
344  // use thrift direct to local server
345  TProtocol protocol = null;
346 
347  TTransport transport =
348  sock_transport_properties.openClientTransport("localhost", dbPort);
349  if (!transport.isOpen()) transport.open();
350  protocol = new TBinaryProtocol(transport);
351 
352  Heavy.Client client = new Heavy.Client(protocol);
353  TTableDetails td = client.get_internal_table_details_for_database(
354  currentUser.getSession(), tableName, default_db);
355  transport.close();
356 
357  return td;
358  } catch (TTransportException ex) {
359  HEAVYDBLOGGER.error(ex.toString());
360  throw new RuntimeException(ex.toString());
361  } catch (TDBException ex) {
362  HEAVYDBLOGGER.error(ex.getError_msg());
363  throw new RuntimeException(ex.getError_msg());
364  } catch (TException ex) {
365  HEAVYDBLOGGER.error(ex.toString());
366  throw new RuntimeException(ex.toString());
367  }
368  }
369 
370  public static final int get_physical_cols(int type) {
371  switch (type) {
372  case KPOINT:
373  return 1; // coords
374  case KMULTIPOINT:
375  case KLINESTRING:
376  return 2; // coords, bounds
377  case KMULTILINESTRING:
378  return 3; // coords, linestring_sizes, bounds
379  case KPOLYGON:
380  return 4; // coords, ring_sizes, bounds, render_group
381  case KMULTIPOLYGON:
382  return 5; // coords, ring_sizes, poly_rings, bounds, render_group
383  default:
384  break;
385  }
386  return 0;
387  }
388 
389  public static final boolean is_geometry(int type) {
390  return type == KPOINT || type == KLINESTRING || type == KMULTILINESTRING
391  || type == KPOLYGON || type == KMULTIPOLYGON || type == KMULTIPOINT;
392  }
393 
394  private TTableDetails get_table_detail_SQL(String tableName) {
395  TTableDetails td = new TTableDetails();
396  td.getRow_descIterator();
397  int id = getTableId(tableName);
398  if (id == -1) {
399  try {
400  // need to mark it as temporary table
401  TTableDetails tempTableTd = get_table_detail_JSON(tableName);
402  tempTableTd.is_temporary = true;
403  return tempTableTd;
404  } catch (Exception e) {
405  String err =
406  "Table '" + tableName + "' does not exist for DB '" + default_db + "'";
407  HEAVYDBLOGGER.error(err);
408  throw new RuntimeException(err);
409  }
410  }
411 
412  // read data from table
413  Statement stmt = null;
414  ResultSet rs = null;
415  try {
416  stmt = catConn.createStatement();
417  HEAVYDBLOGGER.debug("table id is " + id);
418  HEAVYDBLOGGER.debug("table name is " + tableName);
419  String query = String.format(
420  "SELECT * FROM mapd_columns where tableid = %d and not is_deletedcol order by columnid;",
421  id);
422  HEAVYDBLOGGER.debug(query);
423  rs = stmt.executeQuery(query);
424  int skip_physical_cols = 0;
425  while (rs.next()) {
426  String colName = rs.getString("name");
427  HEAVYDBLOGGER.debug("name = " + colName);
428  int colType = rs.getInt("coltype");
429  HEAVYDBLOGGER.debug("coltype = " + colType);
430  int colSubType = rs.getInt("colsubtype");
431  HEAVYDBLOGGER.debug("colsubtype = " + colSubType);
432  int compression = rs.getInt("compression");
433  HEAVYDBLOGGER.debug("compression = " + compression);
434  int compression_param = rs.getInt("comp_param");
435  HEAVYDBLOGGER.debug("comp_param = " + compression_param);
436  int size = rs.getInt("size");
437  HEAVYDBLOGGER.debug("size = " + size);
438  int colDim = rs.getInt("coldim");
439  HEAVYDBLOGGER.debug("coldim = " + colDim);
440  int colScale = rs.getInt("colscale");
441  HEAVYDBLOGGER.debug("colscale = " + colScale);
442  boolean isNotNull = rs.getBoolean("is_notnull");
443  HEAVYDBLOGGER.debug("is_notnull = " + isNotNull);
444  boolean isSystemCol = rs.getBoolean("is_systemcol");
445  HEAVYDBLOGGER.debug("is_systemcol = " + isSystemCol);
446  boolean isVirtualCol = rs.getBoolean("is_virtualcol");
447  HEAVYDBLOGGER.debug("is_vitrualcol = " + isVirtualCol);
448  HEAVYDBLOGGER.debug("");
449  TColumnType tct = new TColumnType();
450  TTypeInfo tti = new TTypeInfo();
451  TDatumType tdt;
452 
453  if (colType == KARRAY) {
454  tti.is_array = true;
455  tdt = typeToThrift(colSubType);
456  } else {
457  tti.is_array = false;
458  tdt = typeToThrift(colType);
459  }
460 
461  tti.nullable = !isNotNull;
462  tti.encoding = encodingToThrift(compression);
463  tti.comp_param = compression_param;
464  tti.size = size;
465  tti.type = tdt;
466  tti.scale = colScale;
467  tti.precision = colDim;
468 
469  tct.col_name = colName;
470  tct.col_type = tti;
471  tct.is_system = isSystemCol;
472 
473  if (skip_physical_cols <= 0) skip_physical_cols = get_physical_cols(colType);
474  if (is_geometry(colType) || skip_physical_cols-- <= 0) td.addToRow_desc(tct);
475  }
476  } catch (Exception e) {
477  String err = "error trying to read from mapd_columns, error was " + e.getMessage();
478  HEAVYDBLOGGER.error(err);
479  throw new RuntimeException(err);
480  } finally {
481  if (rs != null) {
482  try {
483  rs.close();
484  } catch (SQLException ex) {
485  String err = "Could not close resultset, error was " + ex.getMessage();
486  HEAVYDBLOGGER.error(err);
487  throw new RuntimeException(err);
488  }
489  }
490  if (stmt != null) {
491  try {
492  stmt.close();
493  } catch (SQLException ex) {
494  String err = "Could not close stmt, error was " + ex.getMessage();
495  HEAVYDBLOGGER.error(err);
496  throw new RuntimeException(err);
497  }
498  }
499  }
500  if (isView(tableName)) {
501  td.setView_sqlIsSet(true);
502  td.setView_sql(getViewSqlViaSql(id));
503  }
504  return td;
505  }
506 
507  private TTableDetails get_table_detail_JSON(String tableName)
508  throws IOException, RuntimeException {
509  TTableDetails td = new TTableDetails();
510  td.getRow_descIterator();
511 
512  // open table json file
513  final String filePath =
514  dataDir + "/" + CATALOG_DIR_NAME + "/" + default_db + "_temp_tables.json";
515  HEAVYDBLOGGER.debug("Opening temp table file at " + filePath);
516 
517  String tempTablesJsonStr;
518  try {
519  File tempTablesFile = new File(filePath);
520  FileInputStream tempTablesStream = new FileInputStream(tempTablesFile);
521  byte[] data = new byte[(int) tempTablesFile.length()];
522  tempTablesStream.read(data);
523  tempTablesStream.close();
524 
525  tempTablesJsonStr = new String(data, "UTF-8");
526  } catch (java.io.FileNotFoundException e) {
527  throw new RuntimeException("Failed to read temporary tables file.");
528  }
529 
530  Gson gson = new Gson();
531  JsonObject fileParentObject = gson.fromJson(tempTablesJsonStr, JsonObject.class);
532  if (fileParentObject == null) {
533  throw new IOException("Malformed temporary tables file.");
534  }
535 
536  JsonObject tableObject = fileParentObject.getAsJsonObject(tableName);
537  if (tableObject == null) {
538  throw new RuntimeException(
539  "Failed to find table " + tableName + " in temporary tables file.");
540  }
541 
542  String jsonTableName = tableObject.get("name").getAsString();
543  assert (tableName == jsonTableName);
544  int id = tableObject.get("id").getAsInt();
545  HEAVYDBLOGGER.debug("table id is " + id);
546  HEAVYDBLOGGER.debug("table name is " + tableName);
547 
548  JsonArray jsonColumns = tableObject.getAsJsonArray("columns");
549  assert (jsonColumns != null);
550 
551  int skip_physical_cols = 0;
552  for (JsonElement columnElement : jsonColumns) {
553  JsonObject columnObject = columnElement.getAsJsonObject();
554 
555  String colName = columnObject.get("name").getAsString();
556  HEAVYDBLOGGER.debug("name = " + colName);
557  int colType = columnObject.get("coltype").getAsInt();
558  HEAVYDBLOGGER.debug("coltype = " + colType);
559  int colSubType = columnObject.get("colsubtype").getAsInt();
560  HEAVYDBLOGGER.debug("colsubtype = " + colSubType);
561  int compression = columnObject.get("compression").getAsInt();
562  HEAVYDBLOGGER.debug("compression = " + compression);
563  int compression_param = columnObject.get("comp_param").getAsInt();
564  HEAVYDBLOGGER.debug("comp_param = " + compression_param);
565  int size = columnObject.get("size").getAsInt();
566  HEAVYDBLOGGER.debug("size = " + size);
567  int colDim = columnObject.get("coldim").getAsInt();
568  HEAVYDBLOGGER.debug("coldim = " + colDim);
569  int colScale = columnObject.get("colscale").getAsInt();
570  HEAVYDBLOGGER.debug("colscale = " + colScale);
571  boolean isNotNull = columnObject.get("is_notnull").getAsBoolean();
572  HEAVYDBLOGGER.debug("is_notnull = " + isNotNull);
573  boolean isSystemCol = columnObject.get("is_systemcol").getAsBoolean();
574  HEAVYDBLOGGER.debug("is_systemcol = " + isSystemCol);
575  boolean isVirtualCol = columnObject.get("is_virtualcol").getAsBoolean();
576  HEAVYDBLOGGER.debug("is_vitrualcol = " + isVirtualCol);
577  boolean isDeletedCol = columnObject.get("is_deletedcol").getAsBoolean();
578  HEAVYDBLOGGER.debug("is_deletedcol = " + isDeletedCol);
579  HEAVYDBLOGGER.debug("");
580 
581  if (isDeletedCol) {
582  HEAVYDBLOGGER.debug("Skipping delete column.");
583  continue;
584  }
585 
586  TColumnType tct = new TColumnType();
587  TTypeInfo tti = new TTypeInfo();
588  TDatumType tdt;
589 
590  if (colType == KARRAY) {
591  tti.is_array = true;
592  tdt = typeToThrift(colSubType);
593  } else {
594  tti.is_array = false;
595  tdt = typeToThrift(colType);
596  }
597 
598  tti.nullable = !isNotNull;
599  tti.encoding = encodingToThrift(compression);
600  tti.comp_param = compression_param;
601  tti.size = size;
602  tti.type = tdt;
603  tti.scale = colScale;
604  tti.precision = colDim;
605 
606  tct.col_name = colName;
607  tct.col_type = tti;
608  tct.is_system = isSystemCol;
609 
610  if (skip_physical_cols <= 0) skip_physical_cols = get_physical_cols(colType);
611  if (is_geometry(colType) || skip_physical_cols-- <= 0) td.addToRow_desc(tct);
612  }
613 
614  return td;
615  }
616 
617  private int getTableId(String tableName) {
618  Statement stmt = null;
619  ResultSet rs = null;
620  int tableId = -1;
621  try {
622  stmt = catConn.createStatement();
623  rs = stmt.executeQuery(String.format(
624  "SELECT tableid FROM mapd_tables where name = '%s' COLLATE NOCASE;",
625  tableName));
626  while (rs.next()) {
627  tableId = rs.getInt("tableid");
628  HEAVYDBLOGGER.debug("tableId = " + tableId);
629  HEAVYDBLOGGER.debug("");
630  }
631  rs.close();
632  stmt.close();
633  } catch (Exception e) {
634  String err = "Error trying to read from metadata table mapd_tables;DB: "
635  + default_db + " data dir " + dataDir + ", error was " + e.getMessage();
636  HEAVYDBLOGGER.error(err);
637  throw new RuntimeException(err);
638  } finally {
639  if (rs != null) {
640  try {
641  rs.close();
642  } catch (SQLException ex) {
643  String err = "Could not close resultset, error was " + ex.getMessage();
644  HEAVYDBLOGGER.error(err);
645  throw new RuntimeException(err);
646  }
647  }
648  if (stmt != null) {
649  try {
650  stmt.close();
651  } catch (SQLException ex) {
652  String err = "Could not close stmt, error was " + ex.getMessage();
653  HEAVYDBLOGGER.error(err);
654  throw new RuntimeException(err);
655  }
656  }
657  }
658  return (tableId);
659  }
660 
661  private boolean isView(String tableName) {
662  Statement stmt;
663  ResultSet rs;
664  int viewFlag = 0;
665  try {
666  stmt = catConn.createStatement();
667  rs = stmt.executeQuery(String.format(
668  "SELECT isview FROM mapd_tables where name = '%s' COLLATE NOCASE;",
669  tableName));
670  while (rs.next()) {
671  viewFlag = rs.getInt("isview");
672  HEAVYDBLOGGER.debug("viewFlag = " + viewFlag);
673  HEAVYDBLOGGER.debug("");
674  }
675  rs.close();
676  stmt.close();
677  } catch (Exception e) {
678  String err = "error trying to read from mapd_views, error was " + e.getMessage();
679  HEAVYDBLOGGER.error(err);
680  throw new RuntimeException(err);
681  }
682  return (viewFlag == 1);
683  }
684 
685  private String getViewSql(String tableName) {
686  String sqlText;
687  if (dbPort == -1) {
688  // use sql
690  sqlText = getViewSqlViaSql(getTableId(tableName));
692  } else {
693  // use thrift direct to local server
694  try {
695  TProtocol protocol = null;
696 
697  TTransport transport =
698  sock_transport_properties.openClientTransport("localhost", dbPort);
699  if (!transport.isOpen()) transport.open();
700  protocol = new TBinaryProtocol(transport);
701 
702  Heavy.Client client = new Heavy.Client(protocol);
703  TTableDetails td = client.get_table_details_for_database(
704  currentUser.getSession(), tableName, default_db);
705  transport.close();
706 
707  sqlText = td.getView_sql();
708 
709  } catch (TTransportException ex) {
710  HEAVYDBLOGGER.error(ex.toString());
711  throw new RuntimeException(ex.toString());
712  } catch (TDBException ex) {
713  HEAVYDBLOGGER.error(ex.getError_msg());
714  throw new RuntimeException(ex.getError_msg());
715  } catch (TException ex) {
716  HEAVYDBLOGGER.error(ex.toString());
717  throw new RuntimeException(ex.toString());
718  }
719  }
720  /* return string without the sqlite's trailing semicolon */
721  if (sqlText.charAt(sqlText.length() - 1) == ';') {
722  return (sqlText.substring(0, sqlText.length() - 1));
723  } else {
724  return (sqlText);
725  }
726  }
727 
728  // we assume there is already a DB connection here
729  private String getViewSqlViaSql(int tableId) {
730  Statement stmt;
731  ResultSet rs;
732  String sqlText = "";
733  try {
734  stmt = catConn.createStatement();
735  rs = stmt.executeQuery(String.format(
736  "SELECT sql FROM mapd_views where tableid = '%s' COLLATE NOCASE;",
737  tableId));
738  while (rs.next()) {
739  sqlText = rs.getString("sql");
740  HEAVYDBLOGGER.debug("View definition = " + sqlText);
741  HEAVYDBLOGGER.debug("");
742  }
743  rs.close();
744  stmt.close();
745  } catch (Exception e) {
746  String err = "error trying to read from mapd_views, error was " + e.getMessage();
747  HEAVYDBLOGGER.error(err);
748  throw new RuntimeException(err);
749  }
750  if (sqlText == null || sqlText.length() == 0) {
751  String err = "No view text found";
752  HEAVYDBLOGGER.error(err);
753  throw new RuntimeException(err);
754  }
755  return sqlText;
756  }
757 
758  private TDatumType typeToThrift(int type) {
759  switch (type) {
760  case KBOOLEAN:
761  return TDatumType.BOOL;
762  case KTINYINT:
763  return TDatumType.TINYINT;
764  case KSMALLINT:
765  return TDatumType.SMALLINT;
766  case KINT:
767  return TDatumType.INT;
768  case KBIGINT:
769  return TDatumType.BIGINT;
770  case KFLOAT:
771  return TDatumType.FLOAT;
772  case KNUMERIC:
773  case KDECIMAL:
774  return TDatumType.DECIMAL;
775  case KDOUBLE:
776  return TDatumType.DOUBLE;
777  case KTEXT:
778  case KVARCHAR:
779  case KCHAR:
780  return TDatumType.STR;
781  case KTIME:
782  return TDatumType.TIME;
783  case KTIMESTAMP:
784  return TDatumType.TIMESTAMP;
785  case KDATE:
786  return TDatumType.DATE;
787  case KINTERVAL_DAY_TIME:
788  return TDatumType.INTERVAL_DAY_TIME;
790  return TDatumType.INTERVAL_YEAR_MONTH;
791  case KPOINT:
792  return TDatumType.POINT;
793  case KMULTIPOINT:
794  return TDatumType.MULTIPOINT;
795  case KLINESTRING:
796  return TDatumType.LINESTRING;
797  case KMULTILINESTRING:
798  return TDatumType.MULTILINESTRING;
799  case KPOLYGON:
800  return TDatumType.POLYGON;
801  case KMULTIPOLYGON:
802  return TDatumType.MULTIPOLYGON;
803  default:
804  return null;
805  }
806  }
807 
808  private TEncodingType encodingToThrift(int comp) {
809  switch (comp) {
810  case 0:
811  return TEncodingType.NONE;
812  case 1:
813  return TEncodingType.FIXED;
814  case 2:
815  return TEncodingType.RL;
816  case 3:
817  return TEncodingType.DIFF;
818  case 4:
819  return TEncodingType.DICT;
820  case 5:
821  return TEncodingType.SPARSE;
822  case 6:
823  return TEncodingType.GEOINT;
824  case 7:
825  return TEncodingType.DATE_IN_DAYS;
826  default:
827  return null;
828  }
829  }
830 
831  private void populateDatabases() {
832  // TODO 13 Mar 2021 MAT
833  // this probably has to come across from the server on first start up rather
834  // than lazy instantiation here
835  // as a user may not be able to see all schemas and this sets it for the life
836  // of the server.
837  // Proceeding this way as a WIP
838  if (dbPort == 0) {
839  // seems to be a condition that is expected
840  // for FSI testing
841  return;
842  }
843  if (dbPort == -1) {
844  // use sql
845  connectToCatalog("system_catalog"); // hardcoded sys catalog
846  Set<String> dbNames = getDatabases_SQL();
848  for (String dbName : dbNames) {
849  Set<String> ts = new HashSet<String>();
850  DATABASE_TO_TABLES.putIfAbsent(dbName, ts);
851  }
852  return;
853  }
854  // use thrift direct to local server
855  try {
856  TProtocol protocol = null;
857  TTransport transport =
858  sock_transport_properties.openClientTransport("localhost", dbPort);
859  if (!transport.isOpen()) transport.open();
860  protocol = new TBinaryProtocol(transport);
861 
862  Heavy.Client client = new Heavy.Client(protocol);
863 
864  List<TDBInfo> dbList = client.get_databases(currentUser.getSession());
865  for (TDBInfo dbInfo : dbList) {
866  Set<String> ts = new HashSet<String>();
867  DATABASE_TO_TABLES.putIfAbsent(dbInfo.db_name, ts);
868  }
869  transport.close();
870 
871  } catch (TTransportException ex) {
872  HEAVYDBLOGGER.error("TTransportException on port [" + dbPort + "]");
873  HEAVYDBLOGGER.error(ex.toString());
874  throw new RuntimeException(ex.toString());
875  } catch (TDBException ex) {
876  HEAVYDBLOGGER.error(ex.getError_msg());
877  throw new RuntimeException(ex.getError_msg());
878  } catch (TException ex) {
879  HEAVYDBLOGGER.error(ex.toString());
880  throw new RuntimeException(ex.toString());
881  }
882  }
883 
884  private Set<String> getDatabases_SQL() {
885  Set<String> dbSet = new HashSet<String>();
886  Statement stmt = null;
887  ResultSet rs = null;
888  String sqlText = "";
889  try {
890  stmt = catConn.createStatement();
891 
892  // get the tables
893  rs = stmt.executeQuery("SELECT name FROM mapd_databases ");
894  while (rs.next()) {
895  dbSet.add(rs.getString("name"));
896  /*--*/
897  HEAVYDBLOGGER.debug("Object name = " + rs.getString("name"));
898  }
899  rs.close();
900  stmt.close();
901 
902  } catch (Exception e) {
903  String err = "error trying to get all the databases, error was " + e.getMessage();
904  HEAVYDBLOGGER.error(err);
905  throw new RuntimeException(err);
906  }
907  return dbSet;
908  }
909 
910  public void updateMetaData(String schema, String table) {
911  // Check if table is specified, if not we are dropping an entire DB so need to
912  // remove all
913  // tables for that DB
914  if (table.equals("")) {
915  // Drop db and all tables
916  // iterate through all and remove matching schema
917  Set<List<String>> all = new HashSet<>(DB_TABLE_DETAILS.keySet());
918  for (List<String> keys : all) {
919  if (keys.get(0).equals(schema.toUpperCase())) {
920  HEAVYDBLOGGER.debug(
921  "removing all for schema " + keys.get(0) + " table " + keys.get(1));
922  DB_TABLE_DETAILS.remove(keys);
923  }
924  }
925  } else {
926  HEAVYDBLOGGER.debug("removing schema " + schema.toUpperCase() + " table "
927  + table.toUpperCase());
928  DB_TABLE_DETAILS.remove(
929  ImmutableList.of(schema.toUpperCase(), table.toUpperCase()));
930  }
931  // Invalidate views
932  Set<List<String>> all = new HashSet<>(DB_TABLE_DETAILS.keySet());
933  for (List<String> keys : all) {
934  if (keys.get(0).equals(schema.toUpperCase())) {
935  Table ttable = DB_TABLE_DETAILS.get(keys);
936  if (ttable instanceof HeavyDBView) {
937  HEAVYDBLOGGER.debug(
938  "removing view in schema " + keys.get(0) + " view " + keys.get(1));
939  DB_TABLE_DETAILS.remove(keys);
940  }
941  }
942  }
943  // Could be a removal or an add request for a DB
944  Set<String> mSet = DATABASE_TO_TABLES.get(schema.toUpperCase());
945  if (mSet != null) {
946  HEAVYDBLOGGER.debug("removing schema " + schema.toUpperCase());
947  DATABASE_TO_TABLES.remove(schema.toUpperCase());
948  } else {
949  // add a empty database descriptor for new DB, it will be lazily populated when
950  // required
951  Set<String> ts = new HashSet<String>();
952  DATABASE_TO_TABLES.putIfAbsent(schema.toUpperCase(), ts);
953  }
954  }
955 }
static volatile Map< String, Set< String > > DATABASE_TO_TABLES
Table getTable(String tableName)
static final int get_physical_cols(int type)
final HeavyDBUser currentUser
static final int KMULTIPOLYGON
static final int KMULTIPOINT
void updateMetaData(String schema, String table)
static final int KMULTILINESTRING
String getViewSql(String tableName)
static final boolean is_geometry(int type)
MetaConnect(int dbPort, String dataDir, HeavyDBUser currentHeavyDBUser, HeavyDBParser parser, SockTransportProperties skT)
String getViewSqlViaSql(int tableId)
TTableDetails get_table_details(String tableName)
static volatile Map< List< String >, Table > DB_TABLE_DETAILS
static final int KINTERVAL_YEAR_MONTH
final SockTransportProperties sock_transport_properties
static final int KLINESTRING
static final String CATALOG_DIR_NAME
void connectToCatalog(String catalog)
static final Logger HEAVYDBLOGGER
final HeavyDBParser parser
TTableDetails get_table_detail_SQL(String tableName)
TDatumType typeToThrift(int type)
Set< String > getDatabases_SQL()
int getTableId(String tableName)
MetaConnect(int dbPort, String dataDir, HeavyDBUser currentHeavyDBUser, HeavyDBParser parser, SockTransportProperties skT, String db)
TTableDetails get_table_detail_JSON(String tableName)
static final int KINTERVAL_DAY_TIME
boolean isView(String tableName)
TEncodingType encodingToThrift(int comp)