OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
com.mapd.utility.SQLImporter Class Reference
+ Collaboration diagram for com.mapd.utility.SQLImporter:

Static Public Member Functions

static void main (String[] args)
 

Protected Attributes

String session = null
 
Heavy.Client client = null
 

Package Functions

void doWork (String[] args)
 
void executeQuery ()
 

Package Attributes

Db_vendor_types vendor_types = null
 

Static Package Attributes

static final Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class)
 

Private Member Functions

void run_init (Connection conn)
 
void help (Options options)
 
void checkDBTable (Connection otherdb_conn, ResultSetMetaData md) throws SQLException
 
void verifyColumnSignaturesMatch (Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns) throws SQLException
 
void createDBTable (Connection otherdb_conn, ResultSetMetaData metaData)
 
void createDBConnection ()
 
List< TColumnType > getColumnInfo (String tName)
 
boolean tableExists (String tName)
 
void executeDBCommand (String sql)
 
String getColType (int cType, int precision, int scale)
 
TColumn setupBinaryColumn (int i, ResultSetMetaData md, int bufferSize) throws SQLException
 
void setColValue (ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName) throws SQLException
 
void resetBinaryColumn (int i, ResultSetMetaData md, int bufferSize, TColumn col) throws SQLException
 

Private Attributes

CommandLine cmd = null
 
DateTimeUtils dateTimeUtils
 

Detailed Description

Definition at line 322 of file SQLImporter.java.

Member Function Documentation

void com.mapd.utility.SQLImporter.checkDBTable ( Connection  otherdb_conn,
ResultSetMetaData  md 
) throws SQLException
inlineprivate

Definition at line 545 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.createDBTable(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColumnInfo(), com.mapd.utility.SQLImporter.tableExists(), and com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch().

Referenced by com.mapd.utility.SQLImporter.executeQuery().

546  {
548  String tName = cmd.getOptionValue("targetTable");
549 
550  if (tableExists(tName)) {
551  // check if we want to truncate
552  if (cmd.hasOption("truncate")) {
553  executeDBCommand("Drop table " + tName);
554  createDBTable(otherdb_conn, md);
555  } else {
556  List<TColumnType> columnInfo = getColumnInfo(tName);
557  verifyColumnSignaturesMatch(otherdb_conn, columnInfo, md);
558  }
559  } else {
560  createDBTable(otherdb_conn, md);
561  }
562  }
void createDBTable(Connection otherdb_conn, ResultSetMetaData metaData)
void executeDBCommand(String sql)
List< TColumnType > getColumnInfo(String tName)
void verifyColumnSignaturesMatch(Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns)
boolean tableExists(String tName)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBConnection ( )
inlineprivate

Definition at line 737 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.client, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

737  {
738  TTransport transport = null;
739  TProtocol protocol = new TBinaryProtocol(transport);
740  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
741  String server = cmd.getOptionValue("server", "localhost");
742  try {
743  // Uses default certificate stores.
744  boolean load_trust_store = cmd.hasOption("https");
745  SockTransportProperties skT = null;
746  if (cmd.hasOption("https")) {
747  skT = SockTransportProperties.getEncryptedClientDefaultTrustStore(
748  !cmd.hasOption("insecure"));
749  transport = skT.openHttpsClientTransport(server, port);
750  transport.open();
751  protocol = new TJSONProtocol(transport);
752  } else if (cmd.hasOption("http")) {
753  skT = SockTransportProperties.getUnencryptedClient();
754  transport = skT.openHttpClientTransport(server, port);
755  protocol = new TJSONProtocol(transport);
756  } else {
757  skT = SockTransportProperties.getUnencryptedClient();
758  transport = skT.openClientTransport(server, port);
759  transport.open();
760  protocol = new TBinaryProtocol(transport);
761  }
762 
763  client = new Heavy.Client(protocol);
764  // This if will be useless until PKI signon
765  if (cmd.hasOption("user")) {
766  session = client.connect(cmd.getOptionValue("user", "admin"),
767  cmd.getOptionValue("passwd", "HyperInteractive"),
768  cmd.getOptionValue("database", "omnisci"));
769  }
770  LOGGER.debug("Connected session is " + session);
771 
772  } catch (TTransportException ex) {
773  LOGGER.error("Connection failed - " + ex.toString());
774  exit(1);
775  } catch (TDBException ex) {
776  LOGGER.error("Connection failed - " + ex.getError_msg());
777  exit(2);
778  } catch (TException ex) {
779  LOGGER.error("Connection failed - " + ex.toString());
780  exit(3);
781  } catch (Exception ex) {
782  LOGGER.error("General exception - " + ex.toString());
783  exit(4);
784  }
785  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBTable ( Connection  otherdb_conn,
ResultSetMetaData  metaData 
)
inlineprivate

Definition at line 695 of file SQLImporter.java.

References File_Namespace.append(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColType(), Integer, and run_benchmark_import.type.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

695  {
696  StringBuilder sb = new StringBuilder();
697  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
698 
699  // Now iterate the metadata
700  try {
701  for (int i = 1; i <= metaData.getColumnCount(); i++) {
702  if (i > 1) {
703  sb.append(",");
704  }
705  LOGGER.debug("Column name is " + metaData.getColumnName(i));
706  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
707  LOGGER.debug("Column type is " + metaData.getColumnType(i));
708 
709  sb.append(metaData.getColumnName(i)).append(" ");
710  int col_type = metaData.getColumnType(i);
711  if (col_type == java.sql.Types.OTHER) {
712  Db_vendor_types.GisType type =
713  vendor_types.find_gis_type(otherdb_conn, metaData, i);
714  sb.append(Db_vendor_types.gis_type_to_str(type));
715  } else {
716  sb.append(getColType(metaData.getColumnType(i),
717  metaData.getPrecision(i),
718  metaData.getScale(i)));
719  }
720  }
721  sb.append(")");
722 
723  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
724  sb.append(" with (fragment_size = ");
725  sb.append(cmd.getOptionValue("fragmentSize", "0"));
726  sb.append(")");
727  }
728 
729  } catch (SQLException ex) {
730  LOGGER.error("Error processing the metadata - " + ex.toString());
731  exit(1);
732  }
733 
734  executeDBCommand(sb.toString());
735  }
size_t append(FILE *f, const size_t size, const int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:178
String getColType(int cType, int precision, int scale)
void executeDBCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.doWork ( String[]  args)
inlinepackage

Definition at line 338 of file SQLImporter.java.

References run_benchmark_import.args, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.executeQuery().

338  {
339  // create Options object
340 
341  SQLImporter_args s_args = new SQLImporter_args();
342 
343  try {
344  cmd = s_args.parse(args);
345  } catch (ParseException ex) {
346  LOGGER.error(ex.getLocalizedMessage());
347  s_args.printHelpMessage();
348  exit(0);
349  }
350  executeQuery();
351  }

+ Here is the call graph for this function:

void com.mapd.utility.SQLImporter.executeDBCommand ( String  sql)
inlineprivate

Definition at line 822 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable(), and com.mapd.utility.SQLImporter.createDBTable().

822  {
823  LOGGER.info("Run Command - " + sql);
824 
825  try {
826  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
827  } catch (TDBException ex) {
828  LOGGER.error("SQL Execute failed - " + ex.getError_msg());
829  exit(1);
830  } catch (TException ex) {
831  LOGGER.error("SQL Execute failed - " + ex.toString());
832  exit(1);
833  }
834  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.executeQuery ( )
inlinepackage

Definition at line 353 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.checkDBTable(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.db_vendors.Db_vendor_types.isAutoCommitDisabledRequired(), com.mapd.utility.SQLImporter.resetBinaryColumn(), com.mapd.utility.SQLImporter.run_init(), com.mapd.utility.SQLImporter.session, com.mapd.utility.SQLImporter.setColValue(), com.mapd.utility.SQLImporter.setupBinaryColumn(), and com.mapd.utility.SQLImporter.vendor_types.

Referenced by com.mapd.utility.SQLImporter.doWork().

353  {
354  Connection conn = null;
355  Statement stmt = null;
356 
357  long totalTime = 0;
358 
359  try {
360  // Open a connection
361  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
362  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
363  cmd.getOptionValue("sourceUser"),
364  cmd.getOptionValue("sourcePasswd"));
365  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
366  long startTime = System.currentTimeMillis();
367 
368  // run init file script on targe DB if present
369  if (cmd.hasOption("initializeFile")) {
370  run_init(conn);
371  }
372 
373  try {
375  conn.setAutoCommit(false);
376  }
377  } catch (SQLException se) {
378  LOGGER.warn(
379  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
380  + se.toString());
381  }
382 
383  // Execute a query
384  stmt = conn.createStatement();
385 
386  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
387  // set the jdbc fetch buffer size to reduce the amount of records being moved to
388  // java from postgress
389  stmt.setFetchSize(bufferSize);
390  long timer;
391 
392  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
393 
394  // check if table already exists and is compatible in HEAVYAI with the query
395  // metadata
396  ResultSetMetaData md = rs.getMetaData();
397  checkDBTable(conn, md);
398 
399  timer = System.currentTimeMillis();
400 
401  long resultCount = 0;
402  int bufferCount = 0;
403  long total = 0;
404 
405  List<TColumn> cols = new ArrayList(md.getColumnCount());
406  for (int i = 1; i <= md.getColumnCount(); i++) {
407  TColumn col = setupBinaryColumn(i, md, bufferSize);
408  cols.add(col);
409  }
410 
411  boolean assignRenderGroups = !cmd.hasOption("noPolyRenderGroups");
412 
413  // read data from old DB
414  while (rs.next()) {
415  for (int i = 1; i <= md.getColumnCount(); i++) {
416  setColValue(rs,
417  cols.get(i - 1),
418  md.getColumnType(i),
419  i,
420  md.getScale(i),
421  md.getColumnTypeName(i));
422  }
423  resultCount++;
424  bufferCount++;
425  if (bufferCount == bufferSize) {
426  bufferCount = 0;
427  // send the buffer to HEAVY.AI
428  if (assignRenderGroups) {
429  client.load_table_binary_columnar_polys(
430  session, cmd.getOptionValue("targetTable"), cols, null, true);
431  } else {
432  client.load_table_binary_columnar(
433  session, cmd.getOptionValue("targetTable"), cols, null);
434  }
435  // recreate columnar store for use
436  for (int i = 1; i <= md.getColumnCount(); i++) {
437  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
438  }
439 
440  if (resultCount % 100000 == 0) {
441  LOGGER.info("Imported " + resultCount + " records");
442  }
443  }
444  }
445  if (bufferCount > 0) {
446  // send the LAST buffer to HEAVY.AI
447  if (assignRenderGroups) {
448  client.load_table_binary_columnar_polys(
449  session, cmd.getOptionValue("targetTable"), cols, null, true);
450  } else {
451  client.load_table_binary_columnar(
452  session, cmd.getOptionValue("targetTable"), cols, null);
453  }
454  bufferCount = 0;
455  }
456 
457  // dump render group assignment data immediately
458  if (assignRenderGroups) {
459  client.load_table_binary_columnar_polys(
460  session, cmd.getOptionValue("targetTable"), null, null, false);
461  }
462 
463  LOGGER.info("result set count is " + resultCount + " read time is "
464  + (System.currentTimeMillis() - timer) + "ms");
465 
466  // Clean-up environment
467  rs.close();
468  stmt.close();
469  conn.close();
470 
471  totalTime = System.currentTimeMillis() - startTime;
472  } catch (SQLException se) {
473  LOGGER.error("SQLException - " + se.toString());
474  se.printStackTrace();
475  } catch (TDBException ex) {
476  LOGGER.error("TDBException - " + ex.getError_msg());
477  ex.printStackTrace();
478  } catch (TException ex) {
479  LOGGER.error("TException failed - " + ex.toString());
480  ex.printStackTrace();
481  } finally {
482  // finally block used to close resources
483  try {
484  if (stmt != null) {
485  stmt.close();
486  }
487  } catch (SQLException se2) {
488  } // nothing we can do
489  try {
490  if (conn != null) {
491  conn.close();
492  }
493  } catch (SQLException se) {
494  LOGGER.error("SQlException in close - " + se.toString());
495  se.printStackTrace();
496  }
497  try {
498  if (session != null) {
499  client.disconnect(session);
500  }
501  } catch (TDBException ex) {
502  LOGGER.error("TDBException - in finalization " + ex.getError_msg());
503  ex.printStackTrace();
504  } catch (TException ex) {
505  LOGGER.error("TException - in finalization" + ex.toString());
506  ex.printStackTrace();
507  }
508  }
509  }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
void checkDBTable(Connection otherdb_conn, ResultSetMetaData md)
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
void run_init(Connection conn)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

String com.mapd.utility.SQLImporter.getColType ( int  cType,
int  precision,
int  scale 
)
inlineprivate

Definition at line 836 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBTable().

836  {
837  // Note - if cType is OTHER a earlier call will have been made
838  // to try and work out the db vendors specific type.
839  if (precision > 19) {
840  precision = 19;
841  }
842  if (scale > 19) {
843  scale = 18;
844  }
845  switch (cType) {
846  case java.sql.Types.TINYINT:
847  return ("TINYINT");
848  case java.sql.Types.SMALLINT:
849  return ("SMALLINT");
850  case java.sql.Types.INTEGER:
851  return ("INTEGER");
852  case java.sql.Types.BIGINT:
853  return ("BIGINT");
854  case java.sql.Types.FLOAT:
855  return ("FLOAT");
856  case java.sql.Types.DECIMAL:
857  return ("DECIMAL(" + precision + "," + scale + ")");
858  case java.sql.Types.DOUBLE:
859  return ("DOUBLE");
860  case java.sql.Types.REAL:
861  return ("REAL");
862  case java.sql.Types.NUMERIC:
863  return ("NUMERIC(" + precision + "," + scale + ")");
864  case java.sql.Types.TIME:
865  return ("TIME");
866  case java.sql.Types.TIMESTAMP:
867  return ("TIMESTAMP");
868  case java.sql.Types.DATE:
869  return ("DATE");
870  case java.sql.Types.BOOLEAN:
871  case java.sql.Types
872  .BIT: // deal with postgress treating boolean as bit... this will bite me
873  return ("BOOLEAN");
874  case java.sql.Types.NVARCHAR:
875  case java.sql.Types.VARCHAR:
876  case java.sql.Types.NCHAR:
877  case java.sql.Types.CHAR:
878  case java.sql.Types.LONGVARCHAR:
879  case java.sql.Types.LONGNVARCHAR:
880  return ("TEXT ENCODING DICT");
881  default:
882  throw new AssertionError("Column type " + cType + " not Supported");
883  }
884  }

+ Here is the caller graph for this function:

List<TColumnType> com.mapd.utility.SQLImporter.getColumnInfo ( String  tName)
inlineprivate

Definition at line 787 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

787  {
788  LOGGER.debug("Getting columns for " + tName);
789  List<TColumnType> row_descriptor = null;
790  try {
791  TTableDetails table_details = client.get_table_details(session, tName);
792  row_descriptor = table_details.row_desc;
793  } catch (TDBException ex) {
794  LOGGER.error("column check failed - " + ex.getError_msg());
795  exit(3);
796  } catch (TException ex) {
797  LOGGER.error("column check failed - " + ex.toString());
798  exit(3);
799  }
800  return row_descriptor;
801  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.help ( Options  options)
inlineprivate

Definition at line 538 of file SQLImporter.java.

538  {
539  // automatically generate the help statement
540  HelpFormatter formatter = new HelpFormatter();
541  formatter.setOptionComparator(null); // get options in the order they are created
542  formatter.printHelp("SQLImporter", options);
543  }
static void com.mapd.utility.SQLImporter.main ( String[]  args)
inlinestatic

Definition at line 333 of file SQLImporter.java.

References run_benchmark_import.args.

333  {
334  SQLImporter sq = new SQLImporter();
335  sq.doWork(args);
336  }
void com.mapd.utility.SQLImporter.resetBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize,
TColumn  col 
) throws SQLException
inlineprivate

Definition at line 1058 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

1059  {
1060  col.nulls.clear();
1061 
1062  switch (md.getColumnType(i)) {
1063  case java.sql.Types.TINYINT:
1064  case java.sql.Types.SMALLINT:
1065  case java.sql.Types.INTEGER:
1066  case java.sql.Types.BIGINT:
1067  case java.sql.Types.TIME:
1068  case java.sql.Types.TIMESTAMP:
1069  case java.sql.Types
1070  .BIT: // deal with postgress treating boolean as bit... this will bite me
1071  case java.sql.Types.BOOLEAN:
1072  case java.sql.Types.DATE:
1073  case java.sql.Types.DECIMAL:
1074  case java.sql.Types.NUMERIC:
1075  col.data.int_col.clear();
1076  break;
1077 
1078  case java.sql.Types.FLOAT:
1079  case java.sql.Types.DOUBLE:
1080  case java.sql.Types.REAL:
1081  col.data.real_col.clear();
1082  break;
1083 
1084  case java.sql.Types.NVARCHAR:
1085  case java.sql.Types.VARCHAR:
1086  case java.sql.Types.NCHAR:
1087  case java.sql.Types.CHAR:
1088  case java.sql.Types.LONGVARCHAR:
1089  case java.sql.Types.LONGNVARCHAR:
1090  case java.sql.Types.OTHER:
1091  col.data.str_col.clear();
1092  break;
1093  default:
1094  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
1095  }
1096  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.run_init ( Connection  conn)
inlineprivate

Definition at line 511 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, and parse_ast.line.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

511  {
512  // attempt to open file
513  String line = "";
514  try {
515  BufferedReader reader =
516  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
517  Statement stmt = conn.createStatement();
518  while ((line = reader.readLine()) != null) {
519  if (line.isEmpty()) {
520  continue;
521  }
522  LOGGER.info("Running : " + line);
523  stmt.execute(line);
524  }
525  stmt.close();
526  reader.close();
527  } catch (IOException e) {
528  LOGGER.error("Exception occurred trying to read initialize file: "
529  + cmd.getOptionValue("initFile"));
530  exit(1);
531  } catch (SQLException e) {
532  LOGGER.error(
533  "Exception occurred trying to execute initialize file entry : " + line);
534  exit(1);
535  }
536  }
tuple line
Definition: parse_ast.py:10

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.setColValue ( ResultSet  rs,
TColumn  col,
int  columnType,
int  colNum,
int  scale,
String  colTypeName 
) throws SQLException
inlineprivate

Definition at line 932 of file SQLImporter.java.

References heavydb.dtypes.Date, Double, and heavydb.dtypes.Time.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

937  {
938  switch (columnType) {
939  case java.sql.Types
940  .BIT: // deal with postgress treating boolean as bit... this will bite me
941  case java.sql.Types.BOOLEAN:
942  Boolean b = rs.getBoolean(colNum);
943  if (rs.wasNull()) {
944  col.nulls.add(Boolean.TRUE);
945  col.data.int_col.add(0L);
946  } else {
947  col.nulls.add(Boolean.FALSE);
948  col.data.int_col.add(b ? 1L : 0L);
949  }
950  break;
951 
952  case java.sql.Types.DECIMAL:
953  case java.sql.Types.NUMERIC:
954  BigDecimal bd = rs.getBigDecimal(colNum);
955  if (rs.wasNull()) {
956  col.nulls.add(Boolean.TRUE);
957  col.data.int_col.add(0L);
958  } else {
959  col.nulls.add(Boolean.FALSE);
960  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
961  }
962  break;
963 
964  case java.sql.Types.TINYINT:
965  case java.sql.Types.SMALLINT:
966  case java.sql.Types.INTEGER:
967  case java.sql.Types.BIGINT:
968  Long l = rs.getLong(colNum);
969  if (rs.wasNull()) {
970  col.nulls.add(Boolean.TRUE);
971  col.data.int_col.add(new Long(0));
972  } else {
973  col.nulls.add(Boolean.FALSE);
974  col.data.int_col.add(l);
975  }
976  break;
977 
978  case java.sql.Types.TIME:
979  Time t = rs.getTime(colNum);
980  if (rs.wasNull()) {
981  col.nulls.add(Boolean.TRUE);
982  col.data.int_col.add(0L);
983 
984  } else {
985  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
986  col.nulls.add(Boolean.FALSE);
987  }
988 
989  break;
990  case java.sql.Types.TIMESTAMP:
991  Timestamp ts = rs.getTimestamp(colNum);
992  if (rs.wasNull()) {
993  col.nulls.add(Boolean.TRUE);
994  col.data.int_col.add(0L);
995 
996  } else {
997  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
998  col.nulls.add(Boolean.FALSE);
999  }
1000 
1001  break;
1002  case java.sql.Types.DATE:
1003  Date d = rs.getDate(colNum);
1004  if (rs.wasNull()) {
1005  col.nulls.add(Boolean.TRUE);
1006  col.data.int_col.add(0L);
1007 
1008  } else {
1009  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
1010  col.nulls.add(Boolean.FALSE);
1011  }
1012  break;
1013  case java.sql.Types.FLOAT:
1014  case java.sql.Types.DOUBLE:
1015  case java.sql.Types.REAL:
1016  Double db = rs.getDouble(colNum);
1017  if (rs.wasNull()) {
1018  col.nulls.add(Boolean.TRUE);
1019  col.data.real_col.add(new Double(0));
1020 
1021  } else {
1022  col.nulls.add(Boolean.FALSE);
1023  col.data.real_col.add(db);
1024  }
1025  break;
1026 
1027  case java.sql.Types.NVARCHAR:
1028  case java.sql.Types.VARCHAR:
1029  case java.sql.Types.NCHAR:
1030  case java.sql.Types.CHAR:
1031  case java.sql.Types.LONGVARCHAR:
1032  case java.sql.Types.LONGNVARCHAR:
1033  String strVal = rs.getString(colNum);
1034  if (rs.wasNull()) {
1035  col.nulls.add(Boolean.TRUE);
1036  col.data.str_col.add("");
1037 
1038  } else {
1039  col.data.str_col.add(strVal);
1040  col.nulls.add(Boolean.FALSE);
1041  }
1042  break;
1043  case java.sql.Types.OTHER:
1044  Object objVal = rs.getObject(colNum);
1045  if (rs.wasNull()) {
1046  col.nulls.add(Boolean.TRUE);
1047  col.data.str_col.add("");
1048  } else {
1049  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
1050  col.nulls.add(Boolean.FALSE);
1051  }
1052  break;
1053  default:
1054  throw new AssertionError("Column type " + columnType + " not Supported");
1055  }
1056  }

+ Here is the caller graph for this function:

TColumn com.mapd.utility.SQLImporter.setupBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize 
) throws SQLException
inlineprivate

Definition at line 886 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

887  {
888  TColumn col = new TColumn();
889 
890  col.nulls = new ArrayList<Boolean>(bufferSize);
891 
892  col.data = new TColumnData();
893 
894  switch (md.getColumnType(i)) {
895  case java.sql.Types.TINYINT:
896  case java.sql.Types.SMALLINT:
897  case java.sql.Types.INTEGER:
898  case java.sql.Types.BIGINT:
899  case java.sql.Types.TIME:
900  case java.sql.Types.TIMESTAMP:
901  case java.sql.Types
902  .BIT: // deal with postgress treating boolean as bit... this will bite me
903  case java.sql.Types.BOOLEAN:
904  case java.sql.Types.DATE:
905  case java.sql.Types.DECIMAL:
906  case java.sql.Types.NUMERIC:
907  col.data.int_col = new ArrayList<Long>(bufferSize);
908  break;
909 
910  case java.sql.Types.FLOAT:
911  case java.sql.Types.DOUBLE:
912  case java.sql.Types.REAL:
913  col.data.real_col = new ArrayList<Double>(bufferSize);
914  break;
915 
916  case java.sql.Types.NVARCHAR:
917  case java.sql.Types.VARCHAR:
918  case java.sql.Types.NCHAR:
919  case java.sql.Types.CHAR:
920  case java.sql.Types.LONGVARCHAR:
921  case java.sql.Types.LONGNVARCHAR:
922  case java.sql.Types.OTHER:
923  col.data.str_col = new ArrayList<String>(bufferSize);
924  break;
925 
926  default:
927  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
928  }
929  return col;
930  }

+ Here is the caller graph for this function:

boolean com.mapd.utility.SQLImporter.tableExists ( String  tName)
inlineprivate

Definition at line 803 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

803  {
804  LOGGER.debug("Check for table " + tName);
805  try {
806  List<String> recv_get_tables = client.get_tables(session);
807  for (String s : recv_get_tables) {
808  if (s.equals(tName)) {
809  return true;
810  }
811  }
812  } catch (TDBException ex) {
813  LOGGER.error("Table check failed - " + ex.getError_msg());
814  exit(3);
815  } catch (TException ex) {
816  LOGGER.error("Table check failed - " + ex.toString());
817  exit(3);
818  }
819  return false;
820  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch ( Connection  otherdb_conn,
List< TColumnType >  dstColumns,
ResultSetMetaData  srcColumns 
) throws SQLException
inlineprivate

Definition at line 564 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

566  {
567  if (srcColumns.getColumnCount() != dstColumns.size()) {
568  LOGGER.error("Table sizes do not match: Destination " + dstColumns.size()
569  + " versus Source " + srcColumns.getColumnCount());
570  exit(1);
571  }
572  for (int i = 1; i <= dstColumns.size(); ++i) {
573  if (!dstColumns.get(i - 1).getCol_name().equalsIgnoreCase(
574  srcColumns.getColumnName(i))) {
575  LOGGER.error(
576  "Destination table does not have matching column in same order for column number "
577  + i + " destination column name is " + dstColumns.get(i - 1).col_name
578  + " versus target column " + srcColumns.getColumnName(i));
579  exit(1);
580  }
581  TDatumType dstType = dstColumns.get(i - 1).getCol_type().getType();
582  int dstPrecision = dstColumns.get(i - 1).getCol_type().getPrecision();
583  int dstScale = dstColumns.get(i - 1).getCol_type().getScale();
584  int srcType = srcColumns.getColumnType(i);
585  int srcPrecision = srcColumns.getPrecision(i);
586  int srcScale = srcColumns.getScale(i);
587 
588  boolean match = false;
589  switch (srcType) {
590  case java.sql.Types.TINYINT:
591  match |= dstType == TDatumType.TINYINT;
592  // NOTE: it's okay to import smaller type to a bigger one,
593  // so we just fall through and try to match the next type.
594  // But the order of case statements is important here!
595  case java.sql.Types.SMALLINT:
596  match |= dstType == TDatumType.SMALLINT;
597  case java.sql.Types.INTEGER:
598  match |= dstType == TDatumType.INT;
599  case java.sql.Types.BIGINT:
600  match |= dstType == TDatumType.BIGINT;
601  if (cmd.hasOption("AllowIntegerNarrowing")) {
602  match |= dstType == TDatumType.TINYINT || dstType == TDatumType.SMALLINT
603  || dstType == TDatumType.INT;
604  }
605  break;
606  case java.sql.Types.DECIMAL:
607  case java.sql.Types.NUMERIC:
608  match = dstType == TDatumType.DECIMAL && dstPrecision == srcPrecision
609  && dstScale == srcScale;
610  break;
611  case java.sql.Types.FLOAT:
612  case java.sql.Types.REAL:
613  match |= dstType == TDatumType.FLOAT;
614  // Fall through and try double
615  case java.sql.Types.DOUBLE:
616  match |= dstType == TDatumType.DOUBLE;
617  if (cmd.hasOption("AllowDoubleToFloat")) {
618  match |= dstType == TDatumType.FLOAT;
619  }
620  break;
621  case java.sql.Types.TIME:
622  match = dstType == TDatumType.TIME;
623  break;
624  case java.sql.Types.TIMESTAMP:
625  match = dstType == TDatumType.TIMESTAMP;
626  break;
627  case java.sql.Types.DATE:
628  match = dstType == TDatumType.DATE;
629  break;
630  case java.sql.Types.BOOLEAN:
631  case java.sql.Types
632  .BIT: // deal with postgres treating boolean as bit... this will bite me
633  match = dstType == TDatumType.BOOL;
634  break;
635  case java.sql.Types.NVARCHAR:
636  case java.sql.Types.VARCHAR:
637  case java.sql.Types.NCHAR:
638  case java.sql.Types.CHAR:
639  case java.sql.Types.LONGVARCHAR:
640  case java.sql.Types.LONGNVARCHAR:
641  match = (dstType == TDatumType.STR || dstType == TDatumType.POINT
642  || dstType == TDatumType.POLYGON || dstType == TDatumType.MULTIPOLYGON
643  || dstType == TDatumType.LINESTRING
644  || dstType == TDatumType.MULTILINESTRING
645  || dstType == TDatumType.MULTIPOINT);
646  break;
647  case java.sql.Types.OTHER:
648  // NOTE: I ignore subtypes (geography vs geopetry vs none) here just because
649  // it makes no difference for OmniSciDB at the moment
650  Db_vendor_types.GisType gisType =
651  vendor_types.find_gis_type(otherdb_conn, srcColumns, i);
652  if (gisType.srid != dstScale) {
653  match = false;
654  break;
655  }
656  switch (dstType) {
657  case POINT:
658  match = gisType.type.equalsIgnoreCase("POINT");
659  break;
660  case MULTIPOINT:
661  match = gisType.type.equalsIgnoreCase("MULTIPOINT");
662  break;
663  case LINESTRING:
664  match = gisType.type.equalsIgnoreCase("LINESTRING");
665  break;
666  case MULTILINESTRING:
667  match = gisType.type.equalsIgnoreCase("MULTILINESTRING");
668  break;
669  case POLYGON:
670  match = gisType.type.equalsIgnoreCase("POLYGON");
671  break;
672  case MULTIPOLYGON:
673  match = gisType.type.equalsIgnoreCase("MULTIPOLYGON");
674  break;
675  default:
676  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
677  + " not Supported");
678  exit(1);
679  }
680  break;
681  default:
682  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
683  + " not Supported");
684  exit(1);
685  }
686  if (!match) {
687  LOGGER.error("Source and destination types for column "
688  + srcColumns.getColumnName(i)
689  + " do not match. Please make sure that type, precision and scale are exactly the same");
690  exit(1);
691  }
692  }
693  }

+ Here is the caller graph for this function:

Member Data Documentation

Heavy.Client com.mapd.utility.SQLImporter.client = null
protected

Definition at line 324 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection().

DateTimeUtils com.mapd.utility.SQLImporter.dateTimeUtils
private
Initial value:
= (milliseconds) -> {
return milliseconds / 1000;
}

Definition at line 327 of file SQLImporter.java.

final Logger com.mapd.utility.SQLImporter.LOGGER = LoggerFactory.getLogger(SQLImporter.class)
staticpackage

Definition at line 326 of file SQLImporter.java.

String com.mapd.utility.SQLImporter.session = null
protected

Definition at line 323 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.executeQuery(), com.mapd.utility.SQLImporter.getColumnInfo(), heavydb.thrift.Heavy.disconnect_args.read(), heavydb.thrift.Heavy.switch_database_args.read(), heavydb.thrift.Heavy.clone_session_args.read(), heavydb.thrift.Heavy.get_server_status_args.read(), heavydb.thrift.Heavy.get_status_args.read(), heavydb.thrift.Heavy.get_hardware_info_args.read(), heavydb.thrift.Heavy.get_tables_args.read(), heavydb.thrift.Heavy.get_tables_for_database_args.read(), heavydb.thrift.Heavy.get_physical_tables_args.read(), heavydb.thrift.Heavy.get_views_args.read(), heavydb.thrift.Heavy.get_tables_meta_args.read(), heavydb.thrift.Heavy.get_table_details_args.read(), heavydb.thrift.Heavy.get_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_internal_table_details_args.read(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_users_args.read(), heavydb.thrift.Heavy.get_databases_args.read(), heavydb.thrift.Heavy.start_heap_profile_args.read(), heavydb.thrift.Heavy.stop_heap_profile_args.read(), heavydb.thrift.Heavy.get_heap_profile_args.read(), heavydb.thrift.Heavy.get_memory_args.read(), heavydb.thrift.Heavy.clear_cpu_memory_args.read(), heavydb.thrift.Heavy.clear_gpu_memory_args.read(), heavydb.thrift.Heavy.set_table_epoch_args.read(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epoch_args.read(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epochs_args.read(), heavydb.thrift.Heavy.set_table_epochs_args.read(), heavydb.thrift.Heavy.get_session_info_args.read(), heavydb.thrift.Heavy.get_queries_info_args.read(), heavydb.thrift.Heavy.set_leaf_info_args.read(), heavydb.thrift.Heavy.sql_execute_args.read(), heavydb.thrift.Heavy.sql_execute_df_args.read(), heavydb.thrift.Heavy.sql_execute_gdf_args.read(), heavydb.thrift.Heavy.deallocate_df_args.read(), heavydb.thrift.Heavy.sql_validate_args.read(), heavydb.thrift.Heavy.get_completion_hints_args.read(), heavydb.thrift.Heavy.set_execution_mode_args.read(), heavydb.thrift.Heavy.render_vega_args.read(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.read(), heavydb.thrift.Heavy.create_custom_expression_args.read(), heavydb.thrift.Heavy.get_custom_expressions_args.read(), heavydb.thrift.Heavy.update_custom_expression_args.read(), heavydb.thrift.Heavy.delete_custom_expressions_args.read(), heavydb.thrift.Heavy.get_dashboard_args.read(), heavydb.thrift.Heavy.get_dashboards_args.read(), heavydb.thrift.Heavy.create_dashboard_args.read(), heavydb.thrift.Heavy.replace_dashboard_args.read(), heavydb.thrift.Heavy.delete_dashboard_args.read(), heavydb.thrift.Heavy.share_dashboards_args.read(), heavydb.thrift.Heavy.delete_dashboards_args.read(), heavydb.thrift.Heavy.share_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboards_args.read(), heavydb.thrift.Heavy.get_dashboard_grantees_args.read(), heavydb.thrift.Heavy.get_link_view_args.read(), heavydb.thrift.Heavy.create_link_args.read(), heavydb.thrift.Heavy.load_table_binary_args.read(), heavydb.thrift.Heavy.load_table_binary_columnar_args.read(), heavydb.thrift.Heavy.load_table_binary_columnar_polys_args.read(), heavydb.thrift.Heavy.load_table_binary_arrow_args.read(), heavydb.thrift.Heavy.load_table_args.read(), heavydb.thrift.Heavy.detect_column_types_args.read(), heavydb.thrift.Heavy.create_table_args.read(), heavydb.thrift.Heavy.import_table_args.read(), heavydb.thrift.Heavy.import_geo_table_args.read(), heavydb.thrift.Heavy.import_table_status_args.read(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.read(), heavydb.thrift.Heavy.get_all_files_in_archive_args.read(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.read(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.read(), heavydb.thrift.Heavy.check_table_consistency_args.read(), heavydb.thrift.Heavy.start_render_query_args.read(), heavydb.thrift.Heavy.insert_data_args.read(), heavydb.thrift.Heavy.insert_chunks_args.read(), heavydb.thrift.Heavy.checkpoint_args.read(), heavydb.thrift.Heavy.get_roles_args.read(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.read(), heavydb.thrift.Heavy.get_db_object_privs_args.read(), heavydb.thrift.Heavy.get_all_roles_for_user_args.read(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.read(), heavydb.thrift.Heavy.has_role_args.read(), heavydb.thrift.Heavy.has_object_privilege_args.read(), heavydb.thrift.Heavy.set_license_key_args.read(), heavydb.thrift.Heavy.get_license_claims_args.read(), heavydb.thrift.Heavy.get_device_parameters_args.read(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.read(), heavydb.thrift.Heavy.get_table_function_names_args.read(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.read(), heavydb.thrift.Heavy.get_table_function_details_args.read(), heavydb.thrift.Heavy.get_function_names_args.read(), heavydb.thrift.Heavy.get_runtime_function_names_args.read(), heavydb.thrift.Heavy.get_function_details_args.read(), com.mapd.utility.SQLImporter.tableExists(), heavydb.thrift.Heavy.disconnect_args.write(), heavydb.thrift.Heavy.switch_database_args.write(), heavydb.thrift.Heavy.clone_session_args.write(), heavydb.thrift.Heavy.get_server_status_args.write(), heavydb.thrift.Heavy.get_status_args.write(), heavydb.thrift.Heavy.get_hardware_info_args.write(), heavydb.thrift.Heavy.get_tables_args.write(), heavydb.thrift.Heavy.get_tables_for_database_args.write(), heavydb.thrift.Heavy.get_physical_tables_args.write(), heavydb.thrift.Heavy.get_views_args.write(), heavydb.thrift.Heavy.get_tables_meta_args.write(), heavydb.thrift.Heavy.get_table_details_args.write(), heavydb.thrift.Heavy.get_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_internal_table_details_args.write(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_users_args.write(), heavydb.thrift.Heavy.get_databases_args.write(), heavydb.thrift.Heavy.start_heap_profile_args.write(), heavydb.thrift.Heavy.stop_heap_profile_args.write(), heavydb.thrift.Heavy.get_heap_profile_args.write(), heavydb.thrift.Heavy.get_memory_args.write(), heavydb.thrift.Heavy.clear_cpu_memory_args.write(), heavydb.thrift.Heavy.clear_gpu_memory_args.write(), heavydb.thrift.Heavy.set_table_epoch_args.write(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epoch_args.write(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epochs_args.write(), heavydb.thrift.Heavy.set_table_epochs_args.write(), heavydb.thrift.Heavy.get_session_info_args.write(), heavydb.thrift.Heavy.get_queries_info_args.write(), heavydb.thrift.Heavy.set_leaf_info_args.write(), heavydb.thrift.Heavy.sql_execute_args.write(), heavydb.thrift.Heavy.sql_execute_df_args.write(), heavydb.thrift.Heavy.sql_execute_gdf_args.write(), heavydb.thrift.Heavy.deallocate_df_args.write(), heavydb.thrift.Heavy.sql_validate_args.write(), heavydb.thrift.Heavy.get_completion_hints_args.write(), heavydb.thrift.Heavy.set_execution_mode_args.write(), heavydb.thrift.Heavy.render_vega_args.write(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.write(), heavydb.thrift.Heavy.create_custom_expression_args.write(), heavydb.thrift.Heavy.get_custom_expressions_args.write(), heavydb.thrift.Heavy.update_custom_expression_args.write(), heavydb.thrift.Heavy.delete_custom_expressions_args.write(), heavydb.thrift.Heavy.get_dashboard_args.write(), heavydb.thrift.Heavy.get_dashboards_args.write(), heavydb.thrift.Heavy.create_dashboard_args.write(), heavydb.thrift.Heavy.replace_dashboard_args.write(), heavydb.thrift.Heavy.delete_dashboard_args.write(), heavydb.thrift.Heavy.share_dashboards_args.write(), heavydb.thrift.Heavy.delete_dashboards_args.write(), heavydb.thrift.Heavy.share_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboards_args.write(), heavydb.thrift.Heavy.get_dashboard_grantees_args.write(), heavydb.thrift.Heavy.get_link_view_args.write(), heavydb.thrift.Heavy.create_link_args.write(), heavydb.thrift.Heavy.load_table_binary_args.write(), heavydb.thrift.Heavy.load_table_binary_columnar_args.write(), heavydb.thrift.Heavy.load_table_binary_columnar_polys_args.write(), heavydb.thrift.Heavy.load_table_binary_arrow_args.write(), heavydb.thrift.Heavy.load_table_args.write(), heavydb.thrift.Heavy.detect_column_types_args.write(), heavydb.thrift.Heavy.create_table_args.write(), heavydb.thrift.Heavy.import_table_args.write(), heavydb.thrift.Heavy.import_geo_table_args.write(), heavydb.thrift.Heavy.import_table_status_args.write(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.write(), heavydb.thrift.Heavy.get_all_files_in_archive_args.write(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.write(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.write(), heavydb.thrift.Heavy.check_table_consistency_args.write(), heavydb.thrift.Heavy.start_render_query_args.write(), heavydb.thrift.Heavy.insert_data_args.write(), heavydb.thrift.Heavy.insert_chunks_args.write(), heavydb.thrift.Heavy.checkpoint_args.write(), heavydb.thrift.Heavy.get_roles_args.write(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.write(), heavydb.thrift.Heavy.get_db_object_privs_args.write(), heavydb.thrift.Heavy.get_all_roles_for_user_args.write(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.write(), heavydb.thrift.Heavy.has_role_args.write(), heavydb.thrift.Heavy.has_object_privilege_args.write(), heavydb.thrift.Heavy.set_license_key_args.write(), heavydb.thrift.Heavy.get_license_claims_args.write(), heavydb.thrift.Heavy.get_device_parameters_args.write(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.write(), heavydb.thrift.Heavy.get_table_function_names_args.write(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.write(), heavydb.thrift.Heavy.get_table_function_details_args.write(), heavydb.thrift.Heavy.get_function_names_args.write(), heavydb.thrift.Heavy.get_runtime_function_names_args.write(), and heavydb.thrift.Heavy.get_function_details_args.write().

Db_vendor_types com.mapd.utility.SQLImporter.vendor_types = null
package

Definition at line 331 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().


The documentation for this class was generated from the following file: