OmniSciDB  a667adc9c8
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
com.mapd.utility.SQLImporter Class Reference
+ Collaboration diagram for com.mapd.utility.SQLImporter:

Static Public Member Functions

static void main (String[] args)
 

Protected Attributes

String session = null
 
OmniSci.Client client = null
 

Package Functions

void doWork (String[] args)
 
void executeQuery ()
 

Package Attributes

Db_vendor_types vendor_types = null
 

Static Package Attributes

static final Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class)
 

Private Member Functions

void run_init (Connection conn)
 
void help (Options options)
 
void checkMapDTable (Connection otherdb_conn, ResultSetMetaData md) throws SQLException
 
void verifyColumnSignaturesMatch (Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns) throws SQLException
 
void createMapDTable (Connection otherdb_conn, ResultSetMetaData metaData)
 
void createMapDConnection ()
 
List< TColumnType > getColumnInfo (String tName)
 
boolean tableExists (String tName)
 
void executeMapDCommand (String sql)
 
String getColType (int cType, int precision, int scale)
 
TColumn setupBinaryColumn (int i, ResultSetMetaData md, int bufferSize) throws SQLException
 
void setColValue (ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName) throws SQLException
 
void resetBinaryColumn (int i, ResultSetMetaData md, int bufferSize, TColumn col) throws SQLException
 

Private Attributes

CommandLine cmd = null
 
DateTimeUtils dateTimeUtils
 

Detailed Description

Definition at line 307 of file SQLImporter.java.

Member Function Documentation

void com.mapd.utility.SQLImporter.checkMapDTable ( Connection  otherdb_conn,
ResultSetMetaData  md 
) throws SQLException
inlineprivate

Definition at line 516 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.createMapDConnection(), com.mapd.utility.SQLImporter.createMapDTable(), com.mapd.utility.SQLImporter.executeMapDCommand(), com.mapd.utility.SQLImporter.getColumnInfo(), com.mapd.utility.SQLImporter.tableExists(), and com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch().

Referenced by com.mapd.utility.SQLImporter.executeQuery().

517  {
519  String tName = cmd.getOptionValue("targetTable");
520 
521  if (tableExists(tName)) {
522  // check if we want to truncate
523  if (cmd.hasOption("truncate")) {
524  executeMapDCommand("Drop table " + tName);
525  createMapDTable(otherdb_conn, md);
526  } else {
527  List<TColumnType> columnInfo = getColumnInfo(tName);
528  verifyColumnSignaturesMatch(otherdb_conn, columnInfo, md);
529  }
530  } else {
531  createMapDTable(otherdb_conn, md);
532  }
533  }
List< TColumnType > getColumnInfo(String tName)
void createMapDTable(Connection otherdb_conn, ResultSetMetaData metaData)
void verifyColumnSignaturesMatch(Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns)
boolean tableExists(String tName)
void executeMapDCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createMapDConnection ( )
inlineprivate

Definition at line 696 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.client, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

696  {
697  TTransport transport = null;
698  TProtocol protocol = new TBinaryProtocol(transport);
699  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
700  String server = cmd.getOptionValue("server", "localhost");
701  try {
702  // Uses default certificate stores.
703  boolean load_trust_store = cmd.hasOption("https");
704  SockTransportProperties skT = null;
705  if (cmd.hasOption("https")) {
706  skT = SockTransportProperties.getEncryptedClientDefaultTrustStore(
707  !cmd.hasOption("insecure"));
708  transport = skT.openHttpsClientTransport(server, port);
709  transport.open();
710  protocol = new TJSONProtocol(transport);
711  } else if (cmd.hasOption("http")) {
712  skT = SockTransportProperties.getUnencryptedClient();
713  transport = skT.openHttpClientTransport(server, port);
714  protocol = new TJSONProtocol(transport);
715  } else {
716  skT = SockTransportProperties.getUnencryptedClient();
717  transport = skT.openClientTransport(server, port);
718  transport.open();
719  protocol = new TBinaryProtocol(transport);
720  }
721 
722  client = new OmniSci.Client(protocol);
723  // This if will be useless until PKI signon
724  if (cmd.hasOption("user")) {
725  session = client.connect(cmd.getOptionValue("user", "admin"),
726  cmd.getOptionValue("passwd", "HyperInteractive"),
727  cmd.getOptionValue("database", "omnisci"));
728  }
729  LOGGER.debug("Connected session is " + session);
730 
731  } catch (TTransportException ex) {
732  LOGGER.error("Connection failed - " + ex.toString());
733  exit(1);
734  } catch (TOmniSciException ex) {
735  LOGGER.error("Connection failed - " + ex.toString());
736  exit(2);
737  } catch (TException ex) {
738  LOGGER.error("Connection failed - " + ex.toString());
739  exit(3);
740  } catch (Exception ex) {
741  LOGGER.error("General exception - " + ex.toString());
742  exit(4);
743  }
744  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createMapDTable ( Connection  otherdb_conn,
ResultSetMetaData  metaData 
)
inlineprivate

Definition at line 654 of file SQLImporter.java.

References File_Namespace.append(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.executeMapDCommand(), com.mapd.utility.SQLImporter.getColType(), i, Integer, and run_benchmark_import.type.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

654  {
655  StringBuilder sb = new StringBuilder();
656  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
657 
658  // Now iterate the metadata
659  try {
660  for (int i = 1; i <= metaData.getColumnCount(); i++) {
661  if (i > 1) {
662  sb.append(",");
663  }
664  LOGGER.debug("Column name is " + metaData.getColumnName(i));
665  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
666  LOGGER.debug("Column type is " + metaData.getColumnType(i));
667 
668  sb.append(metaData.getColumnName(i)).append(" ");
669  int col_type = metaData.getColumnType(i);
670  if (col_type == java.sql.Types.OTHER) {
671  Db_vendor_types.GisType type =
672  vendor_types.find_gis_type(otherdb_conn, metaData, i);
673  sb.append(Db_vendor_types.gis_type_to_str(type));
674  } else {
675  sb.append(getColType(metaData.getColumnType(i),
676  metaData.getPrecision(i),
677  metaData.getScale(i)));
678  }
679  }
680  sb.append(")");
681 
682  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
683  sb.append(" with (fragment_size = ");
684  sb.append(cmd.getOptionValue("fragmentSize", "0"));
685  sb.append(")");
686  }
687 
688  } catch (SQLException ex) {
689  LOGGER.error("Error processing the metadata - " + ex.toString());
690  exit(1);
691  }
692 
693  executeMapDCommand(sb.toString());
694  }
String getColType(int cType, int precision, int scale)
size_t append(FILE *f, const size_t size, int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:159
void executeMapDCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.doWork ( String[]  args)
inlinepackage

Definition at line 323 of file SQLImporter.java.

References run_benchmark_import.args, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.executeQuery().

323  {
324  // create Options object
325 
326  SQLImporter_args s_args = new SQLImporter_args();
327 
328  try {
329  cmd = s_args.parse(args);
330  } catch (ParseException ex) {
331  LOGGER.error(ex.getLocalizedMessage());
332  s_args.printHelpMessage();
333  exit(0);
334  }
335  executeQuery();
336  }

+ Here is the call graph for this function:

void com.mapd.utility.SQLImporter.executeMapDCommand ( String  sql)
inlineprivate

Definition at line 781 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable(), and com.mapd.utility.SQLImporter.createMapDTable().

781  {
782  LOGGER.info("Run Command - " + sql);
783 
784  try {
785  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
786  } catch (TOmniSciException ex) {
787  LOGGER.error("SQL Execute failed - " + ex.toString());
788  exit(1);
789  } catch (TException ex) {
790  LOGGER.error("SQL Execute failed - " + ex.toString());
791  exit(1);
792  }
793  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.executeQuery ( )
inlinepackage

Definition at line 338 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.checkMapDTable(), com.mapd.utility.SQLImporter.cmd, i, com.mapd.utility.db_vendors.Db_vendor_types.isAutoCommitDisabledRequired(), com.mapd.utility.SQLImporter.resetBinaryColumn(), com.mapd.utility.SQLImporter.run_init(), com.mapd.utility.SQLImporter.session, com.mapd.utility.SQLImporter.setColValue(), com.mapd.utility.SQLImporter.setupBinaryColumn(), and com.mapd.utility.SQLImporter.vendor_types.

Referenced by com.mapd.utility.SQLImporter.doWork().

338  {
339  Connection conn = null;
340  Statement stmt = null;
341 
342  long totalTime = 0;
343 
344  try {
345  // Open a connection
346  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
347  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
348  cmd.getOptionValue("sourceUser"),
349  cmd.getOptionValue("sourcePasswd"));
350  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
351  long startTime = System.currentTimeMillis();
352 
353  // run init file script on targe DB if present
354  if (cmd.hasOption("initializeFile")) {
355  run_init(conn);
356  }
357 
358  try {
360  conn.setAutoCommit(false);
361  }
362  } catch (SQLException se) {
363  LOGGER.warn(
364  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
365  + se.toString());
366  }
367 
368  // Execute a query
369  stmt = conn.createStatement();
370 
371  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
372  // set the jdbc fetch buffer size to reduce the amount of records being moved to
373  // java from postgress
374  stmt.setFetchSize(bufferSize);
375  long timer;
376 
377  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
378 
379  // check if table already exists and is compatible in OmniSci with the query
380  // metadata
381  ResultSetMetaData md = rs.getMetaData();
382  checkMapDTable(conn, md);
383 
384  timer = System.currentTimeMillis();
385 
386  long resultCount = 0;
387  int bufferCount = 0;
388  long total = 0;
389 
390  List<TColumn> cols = new ArrayList(md.getColumnCount());
391  for (int i = 1; i <= md.getColumnCount(); i++) {
392  TColumn col = setupBinaryColumn(i, md, bufferSize);
393  cols.add(col);
394  }
395 
396  // read data from old DB
397  while (rs.next()) {
398  for (int i = 1; i <= md.getColumnCount(); i++) {
399  setColValue(rs,
400  cols.get(i - 1),
401  md.getColumnType(i),
402  i,
403  md.getScale(i),
404  md.getColumnTypeName(i));
405  }
406  resultCount++;
407  bufferCount++;
408  if (bufferCount == bufferSize) {
409  bufferCount = 0;
410  // send the buffer to mapD
411  client.load_table_binary_columnar_polys(
412  session, cmd.getOptionValue("targetTable"), cols, null, true);
413  // recreate columnar store for use
414  for (int i = 1; i <= md.getColumnCount(); i++) {
415  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
416  }
417 
418  if (resultCount % 100000 == 0) {
419  LOGGER.info("Imported " + resultCount + " records");
420  }
421  }
422  }
423  if (bufferCount > 0) {
424  // send the LAST buffer to mapD
425  client.load_table_binary_columnar_polys(
426  session, cmd.getOptionValue("targetTable"), cols, null, true);
427  bufferCount = 0;
428  }
429 
430  // dump render group assignment data immediately
431  client.load_table_binary_columnar_polys(
432  session, cmd.getOptionValue("targetTable"), null, null, false);
433 
434  LOGGER.info("result set count is " + resultCount + " read time is "
435  + (System.currentTimeMillis() - timer) + "ms");
436 
437  // Clean-up environment
438  rs.close();
439  stmt.close();
440  conn.close();
441 
442  totalTime = System.currentTimeMillis() - startTime;
443  } catch (SQLException se) {
444  LOGGER.error("SQLException - " + se.toString());
445  se.printStackTrace();
446  } catch (TOmniSciException ex) {
447  LOGGER.error("TOmniSciException - " + ex.toString());
448  ex.printStackTrace();
449  } catch (TException ex) {
450  LOGGER.error("TException failed - " + ex.toString());
451  ex.printStackTrace();
452  } finally {
453  // finally block used to close resources
454  try {
455  if (stmt != null) {
456  stmt.close();
457  }
458  } catch (SQLException se2) {
459  } // nothing we can do
460  try {
461  if (conn != null) {
462  conn.close();
463  }
464  } catch (SQLException se) {
465  LOGGER.error("SQlException in close - " + se.toString());
466  se.printStackTrace();
467  }
468  try {
469  if (session != null) {
470  client.disconnect(session);
471  }
472  } catch (TOmniSciException ex) {
473  LOGGER.error("TOmniSciException - in finalization " + ex.toString());
474  ex.printStackTrace();
475  } catch (TException ex) {
476  LOGGER.error("TException - in finalization" + ex.toString());
477  ex.printStackTrace();
478  }
479  }
480  }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
void checkMapDTable(Connection otherdb_conn, ResultSetMetaData md)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
void run_init(Connection conn)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

String com.mapd.utility.SQLImporter.getColType ( int  cType,
int  precision,
int  scale 
)
inlineprivate

Definition at line 795 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createMapDTable().

795  {
796  // Note - if cType is OTHER a earlier call will have been made
797  // to try and work out the db vendors specific type.
798  if (precision > 19) {
799  precision = 19;
800  }
801  if (scale > 19) {
802  scale = 18;
803  }
804  switch (cType) {
805  case java.sql.Types.TINYINT:
806  return ("TINYINT");
807  case java.sql.Types.SMALLINT:
808  return ("SMALLINT");
809  case java.sql.Types.INTEGER:
810  return ("INTEGER");
811  case java.sql.Types.BIGINT:
812  return ("BIGINT");
813  case java.sql.Types.FLOAT:
814  return ("FLOAT");
815  case java.sql.Types.DECIMAL:
816  return ("DECIMAL(" + precision + "," + scale + ")");
817  case java.sql.Types.DOUBLE:
818  return ("DOUBLE");
819  case java.sql.Types.REAL:
820  return ("REAL");
821  case java.sql.Types.NUMERIC:
822  return ("NUMERIC(" + precision + "," + scale + ")");
823  case java.sql.Types.TIME:
824  return ("TIME");
825  case java.sql.Types.TIMESTAMP:
826  return ("TIMESTAMP");
827  case java.sql.Types.DATE:
828  return ("DATE");
829  case java.sql.Types.BOOLEAN:
830  case java.sql.Types
831  .BIT: // deal with postgress treating boolean as bit... this will bite me
832  return ("BOOLEAN");
833  case java.sql.Types.NVARCHAR:
834  case java.sql.Types.VARCHAR:
835  case java.sql.Types.NCHAR:
836  case java.sql.Types.CHAR:
837  case java.sql.Types.LONGVARCHAR:
838  case java.sql.Types.LONGNVARCHAR:
839  return ("TEXT ENCODING DICT");
840  default:
841  throw new AssertionError("Column type " + cType + " not Supported");
842  }
843  }

+ Here is the caller graph for this function:

List<TColumnType> com.mapd.utility.SQLImporter.getColumnInfo ( String  tName)
inlineprivate

Definition at line 746 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

746  {
747  LOGGER.debug("Getting columns for " + tName);
748  List<TColumnType> row_descriptor = null;
749  try {
750  TTableDetails table_details = client.get_table_details(session, tName);
751  row_descriptor = table_details.row_desc;
752  } catch (TOmniSciException ex) {
753  LOGGER.error("column check failed - " + ex.toString());
754  exit(3);
755  } catch (TException ex) {
756  LOGGER.error("column check failed - " + ex.toString());
757  exit(3);
758  }
759  return row_descriptor;
760  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.help ( Options  options)
inlineprivate

Definition at line 509 of file SQLImporter.java.

509  {
510  // automatically generate the help statement
511  HelpFormatter formatter = new HelpFormatter();
512  formatter.setOptionComparator(null); // get options in the order they are created
513  formatter.printHelp("SQLImporter", options);
514  }
static void com.mapd.utility.SQLImporter.main ( String[]  args)
inlinestatic

Definition at line 318 of file SQLImporter.java.

References run_benchmark_import.args.

318  {
319  SQLImporter sq = new SQLImporter();
320  sq.doWork(args);
321  }
void com.mapd.utility.SQLImporter.resetBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize,
TColumn  col 
) throws SQLException
inlineprivate

Definition at line 1016 of file SQLImporter.java.

References i.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

1017  {
1018  col.nulls.clear();
1019 
1020  switch (md.getColumnType(i)) {
1021  case java.sql.Types.TINYINT:
1022  case java.sql.Types.SMALLINT:
1023  case java.sql.Types.INTEGER:
1024  case java.sql.Types.BIGINT:
1025  case java.sql.Types.TIME:
1026  case java.sql.Types.TIMESTAMP:
1027  case java.sql.Types
1028  .BIT: // deal with postgress treating boolean as bit... this will bite me
1029  case java.sql.Types.BOOLEAN:
1030  case java.sql.Types.DATE:
1031  case java.sql.Types.DECIMAL:
1032  case java.sql.Types.NUMERIC:
1033  col.data.int_col.clear();
1034  break;
1035 
1036  case java.sql.Types.FLOAT:
1037  case java.sql.Types.DOUBLE:
1038  case java.sql.Types.REAL:
1039  col.data.real_col.clear();
1040  break;
1041 
1042  case java.sql.Types.NVARCHAR:
1043  case java.sql.Types.VARCHAR:
1044  case java.sql.Types.NCHAR:
1045  case java.sql.Types.CHAR:
1046  case java.sql.Types.LONGVARCHAR:
1047  case java.sql.Types.LONGNVARCHAR:
1048  case java.sql.Types.OTHER:
1049  col.data.str_col.clear();
1050  break;
1051  default:
1052  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
1053  }
1054  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.run_init ( Connection  conn)
inlineprivate

Definition at line 482 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, and generate_TableFunctionsFactory_init.line.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

482  {
483  // attempt to open file
484  String line = "";
485  try {
486  BufferedReader reader =
487  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
488  Statement stmt = conn.createStatement();
489  while ((line = reader.readLine()) != null) {
490  if (line.isEmpty()) {
491  continue;
492  }
493  LOGGER.info("Running : " + line);
494  stmt.execute(line);
495  }
496  stmt.close();
497  reader.close();
498  } catch (IOException e) {
499  LOGGER.error("Exception occurred trying to read initialize file: "
500  + cmd.getOptionValue("initFile"));
501  exit(1);
502  } catch (SQLException e) {
503  LOGGER.error(
504  "Exception occurred trying to execute initialize file entry : " + line);
505  exit(1);
506  }
507  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.setColValue ( ResultSet  rs,
TColumn  col,
int  columnType,
int  colNum,
int  scale,
String  colTypeName 
) throws SQLException
inlineprivate

Definition at line 891 of file SQLImporter.java.

References test_fsi.d, omnisci.dtypes.Date, Double, t, omnisci.dtypes.Time, and omnisci.dtypes.Timestamp.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

896  {
897  switch (columnType) {
898  case java.sql.Types
899  .BIT: // deal with postgress treating boolean as bit... this will bite me
900  case java.sql.Types.BOOLEAN:
901  Boolean b = rs.getBoolean(colNum);
902  if (rs.wasNull()) {
903  col.nulls.add(Boolean.TRUE);
904  col.data.int_col.add(0L);
905  } else {
906  col.nulls.add(Boolean.FALSE);
907  col.data.int_col.add(b ? 1L : 0L);
908  }
909  break;
910 
911  case java.sql.Types.DECIMAL:
912  case java.sql.Types.NUMERIC:
913  BigDecimal bd = rs.getBigDecimal(colNum);
914  if (rs.wasNull()) {
915  col.nulls.add(Boolean.TRUE);
916  col.data.int_col.add(0L);
917  } else {
918  col.nulls.add(Boolean.FALSE);
919  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
920  }
921  break;
922 
923  case java.sql.Types.TINYINT:
924  case java.sql.Types.SMALLINT:
925  case java.sql.Types.INTEGER:
926  case java.sql.Types.BIGINT:
927  Long l = rs.getLong(colNum);
928  if (rs.wasNull()) {
929  col.nulls.add(Boolean.TRUE);
930  col.data.int_col.add(new Long(0));
931  } else {
932  col.nulls.add(Boolean.FALSE);
933  col.data.int_col.add(l);
934  }
935  break;
936 
937  case java.sql.Types.TIME:
938  Time t = rs.getTime(colNum);
939  if (rs.wasNull()) {
940  col.nulls.add(Boolean.TRUE);
941  col.data.int_col.add(0L);
942 
943  } else {
944  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
945  col.nulls.add(Boolean.FALSE);
946  }
947 
948  break;
949  case java.sql.Types.TIMESTAMP:
950  Timestamp ts = rs.getTimestamp(colNum);
951  if (rs.wasNull()) {
952  col.nulls.add(Boolean.TRUE);
953  col.data.int_col.add(0L);
954 
955  } else {
956  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
957  col.nulls.add(Boolean.FALSE);
958  }
959 
960  break;
961  case java.sql.Types.DATE:
962  Date d = rs.getDate(colNum);
963  if (rs.wasNull()) {
964  col.nulls.add(Boolean.TRUE);
965  col.data.int_col.add(0L);
966 
967  } else {
968  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
969  col.nulls.add(Boolean.FALSE);
970  }
971  break;
972  case java.sql.Types.FLOAT:
973  case java.sql.Types.DOUBLE:
974  case java.sql.Types.REAL:
975  Double db = rs.getDouble(colNum);
976  if (rs.wasNull()) {
977  col.nulls.add(Boolean.TRUE);
978  col.data.real_col.add(new Double(0));
979 
980  } else {
981  col.nulls.add(Boolean.FALSE);
982  col.data.real_col.add(db);
983  }
984  break;
985 
986  case java.sql.Types.NVARCHAR:
987  case java.sql.Types.VARCHAR:
988  case java.sql.Types.NCHAR:
989  case java.sql.Types.CHAR:
990  case java.sql.Types.LONGVARCHAR:
991  case java.sql.Types.LONGNVARCHAR:
992  String strVal = rs.getString(colNum);
993  if (rs.wasNull()) {
994  col.nulls.add(Boolean.TRUE);
995  col.data.str_col.add("");
996 
997  } else {
998  col.data.str_col.add(strVal);
999  col.nulls.add(Boolean.FALSE);
1000  }
1001  break;
1002  case java.sql.Types.OTHER:
1003  if (rs.wasNull()) {
1004  col.nulls.add(Boolean.TRUE);
1005  col.data.str_col.add("");
1006  } else {
1007  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
1008  col.nulls.add(Boolean.FALSE);
1009  }
1010  break;
1011  default:
1012  throw new AssertionError("Column type " + columnType + " not Supported");
1013  }
1014  }
tuple d
Definition: test_fsi.py:9
char * t

+ Here is the caller graph for this function:

TColumn com.mapd.utility.SQLImporter.setupBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize 
) throws SQLException
inlineprivate

Definition at line 845 of file SQLImporter.java.

References i.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

846  {
847  TColumn col = new TColumn();
848 
849  col.nulls = new ArrayList<Boolean>(bufferSize);
850 
851  col.data = new TColumnData();
852 
853  switch (md.getColumnType(i)) {
854  case java.sql.Types.TINYINT:
855  case java.sql.Types.SMALLINT:
856  case java.sql.Types.INTEGER:
857  case java.sql.Types.BIGINT:
858  case java.sql.Types.TIME:
859  case java.sql.Types.TIMESTAMP:
860  case java.sql.Types
861  .BIT: // deal with postgress treating boolean as bit... this will bite me
862  case java.sql.Types.BOOLEAN:
863  case java.sql.Types.DATE:
864  case java.sql.Types.DECIMAL:
865  case java.sql.Types.NUMERIC:
866  col.data.int_col = new ArrayList<Long>(bufferSize);
867  break;
868 
869  case java.sql.Types.FLOAT:
870  case java.sql.Types.DOUBLE:
871  case java.sql.Types.REAL:
872  col.data.real_col = new ArrayList<Double>(bufferSize);
873  break;
874 
875  case java.sql.Types.NVARCHAR:
876  case java.sql.Types.VARCHAR:
877  case java.sql.Types.NCHAR:
878  case java.sql.Types.CHAR:
879  case java.sql.Types.LONGVARCHAR:
880  case java.sql.Types.LONGNVARCHAR:
881  case java.sql.Types.OTHER:
882  col.data.str_col = new ArrayList<String>(bufferSize);
883  break;
884 
885  default:
886  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
887  }
888  return col;
889  }

+ Here is the caller graph for this function:

boolean com.mapd.utility.SQLImporter.tableExists ( String  tName)
inlineprivate

Definition at line 762 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

762  {
763  LOGGER.debug("Check for table " + tName);
764  try {
765  List<String> recv_get_tables = client.get_tables(session);
766  for (String s : recv_get_tables) {
767  if (s.equals(tName)) {
768  return true;
769  }
770  }
771  } catch (TOmniSciException ex) {
772  LOGGER.error("Table check failed - " + ex.toString());
773  exit(3);
774  } catch (TException ex) {
775  LOGGER.error("Table check failed - " + ex.toString());
776  exit(3);
777  }
778  return false;
779  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch ( Connection  otherdb_conn,
List< TColumnType >  dstColumns,
ResultSetMetaData  srcColumns 
) throws SQLException
inlineprivate

Definition at line 535 of file SQLImporter.java.

References BIGINT, com.mapd.utility.SQLImporter.cmd, DATE, DOUBLE, FLOAT, i, LINESTRING, MULTIPOLYGON, POINT, POLYGON, SMALLINT, TIME, TIMESTAMP, and TINYINT.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

537  {
538  if (srcColumns.getColumnCount() != dstColumns.size()) {
539  LOGGER.error("Table sizes do not match: Destination " + dstColumns.size()
540  + " versus Source " + srcColumns.getColumnCount());
541  exit(1);
542  }
543  for (int i = 1; i <= dstColumns.size(); ++i) {
544  if (!dstColumns.get(i - 1).getCol_name().equalsIgnoreCase(
545  srcColumns.getColumnName(i))) {
546  LOGGER.error(
547  "Destination table does not have matching column in same order for column number "
548  + i + " destination column name is " + dstColumns.get(i - 1).col_name
549  + " versus target column " + srcColumns.getColumnName(i));
550  exit(1);
551  }
552  TDatumType dstType = dstColumns.get(i - 1).getCol_type().getType();
553  int dstPrecision = dstColumns.get(i - 1).getCol_type().getPrecision();
554  int dstScale = dstColumns.get(i - 1).getCol_type().getScale();
555  int srcType = srcColumns.getColumnType(i);
556  int srcPrecision = srcColumns.getPrecision(i);
557  int srcScale = srcColumns.getScale(i);
558 
559  boolean match = false;
560  switch (srcType) {
561  case java.sql.Types.TINYINT:
562  match |= dstType == TDatumType.TINYINT;
563  // NOTE: it's okay to import smaller type to a bigger one,
564  // so we just fall through and try to match the next type.
565  // But the order of case statements is important here!
566  case java.sql.Types.SMALLINT:
567  match |= dstType == TDatumType.SMALLINT;
568  case java.sql.Types.INTEGER:
569  match |= dstType == TDatumType.INT;
570  case java.sql.Types.BIGINT:
571  match |= dstType == TDatumType.BIGINT;
572  break;
573  case java.sql.Types.DECIMAL:
574  case java.sql.Types.NUMERIC:
575  match = dstType == TDatumType.DECIMAL && dstPrecision == srcPrecision
576  && dstScale == srcScale;
577  break;
578  case java.sql.Types.FLOAT:
579  case java.sql.Types.REAL:
580  match |= dstType == TDatumType.FLOAT;
581  // Fall through and try double
582  case java.sql.Types.DOUBLE:
583  match |= dstType == TDatumType.DOUBLE;
584  if (cmd.hasOption("AllowDoubleToFloat")) {
585  match |= dstType == TDatumType.FLOAT;
586  }
587  break;
588  case java.sql.Types.TIME:
589  match = dstType == TDatumType.TIME;
590  break;
591  case java.sql.Types.TIMESTAMP:
592  match = dstType == TDatumType.TIMESTAMP;
593  break;
594  case java.sql.Types.DATE:
595  match = dstType == TDatumType.DATE;
596  break;
597  case java.sql.Types.BOOLEAN:
598  case java.sql.Types
599  .BIT: // deal with postgres treating boolean as bit... this will bite me
600  match = dstType == TDatumType.BOOL;
601  break;
602  case java.sql.Types.NVARCHAR:
603  case java.sql.Types.VARCHAR:
604  case java.sql.Types.NCHAR:
605  case java.sql.Types.CHAR:
606  case java.sql.Types.LONGVARCHAR:
607  case java.sql.Types.LONGNVARCHAR:
608  match = (dstType == TDatumType.STR || dstType == TDatumType.POINT
609  || dstType == TDatumType.POLYGON || dstType == TDatumType.MULTIPOLYGON
610  || dstType == TDatumType.LINESTRING);
611  break;
612  case java.sql.Types.OTHER:
613  // NOTE: I ignore subtypes (geography vs geopetry vs none) here just because
614  // it makes no difference for OmniSciDB at the moment
615  Db_vendor_types.GisType gisType =
616  vendor_types.find_gis_type(otherdb_conn, srcColumns, i);
617  if (gisType.srid != dstScale) {
618  match = false;
619  break;
620  }
621  switch (dstType) {
622  case POINT:
623  match = gisType.type.equalsIgnoreCase("POINT");
624  break;
625  case LINESTRING:
626  match = gisType.type.equalsIgnoreCase("LINESTRING");
627  break;
628  case POLYGON:
629  match = gisType.type.equalsIgnoreCase("POLYGON");
630  break;
631  case MULTIPOLYGON:
632  match = gisType.type.equalsIgnoreCase("MULTIPOLYGON");
633  break;
634  default:
635  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
636  + " not Supported");
637  exit(1);
638  }
639  break;
640  default:
641  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
642  + " not Supported");
643  exit(1);
644  }
645  if (!match) {
646  LOGGER.error("Source and destination types for column "
647  + srcColumns.getColumnName(i)
648  + " do not match. Please make sure that type, precision and scale are exactly the same");
649  exit(1);
650  }
651  }
652  }
#define LINESTRING
#define SMALLINT
#define DOUBLE
#define BIGINT
#define DATE
#define MULTIPOLYGON
#define POINT
#define TIME
#define TINYINT
#define TIMESTAMP
#define POLYGON
#define FLOAT

+ Here is the caller graph for this function:

Member Data Documentation

OmniSci.Client com.mapd.utility.SQLImporter.client = null
protected

Definition at line 309 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createMapDConnection().

DateTimeUtils com.mapd.utility.SQLImporter.dateTimeUtils
private
Initial value:
= (milliseconds) -> {
return milliseconds / 1000;
}

Definition at line 312 of file SQLImporter.java.

final Logger com.mapd.utility.SQLImporter.LOGGER = LoggerFactory.getLogger(SQLImporter.class)
staticpackage

Definition at line 311 of file SQLImporter.java.

String com.mapd.utility.SQLImporter.session = null
protected

Definition at line 308 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createMapDConnection(), com.mapd.utility.SQLImporter.executeMapDCommand(), com.mapd.utility.SQLImporter.executeQuery(), com.mapd.utility.SQLImporter.getColumnInfo(), omnisci.thrift.OmniSci.disconnect_args.read(), omnisci.thrift.OmniSci.switch_database_args.read(), omnisci.thrift.OmniSci.clone_session_args.read(), omnisci.thrift.OmniSci.get_server_status_args.read(), omnisci.thrift.OmniSci.get_status_args.read(), omnisci.thrift.OmniSci.get_hardware_info_args.read(), omnisci.thrift.OmniSci.get_tables_args.read(), omnisci.thrift.OmniSci.get_physical_tables_args.read(), omnisci.thrift.OmniSci.get_views_args.read(), omnisci.thrift.OmniSci.get_tables_meta_args.read(), omnisci.thrift.OmniSci.get_table_details_args.read(), omnisci.thrift.OmniSci.get_internal_table_details_args.read(), omnisci.thrift.OmniSci.get_users_args.read(), omnisci.thrift.OmniSci.get_databases_args.read(), omnisci.thrift.OmniSci.start_heap_profile_args.read(), omnisci.thrift.OmniSci.stop_heap_profile_args.read(), omnisci.thrift.OmniSci.get_heap_profile_args.read(), omnisci.thrift.OmniSci.get_memory_args.read(), omnisci.thrift.OmniSci.clear_cpu_memory_args.read(), omnisci.thrift.OmniSci.clear_gpu_memory_args.read(), omnisci.thrift.OmniSci.set_table_epoch_args.read(), omnisci.thrift.OmniSci.set_table_epoch_by_name_args.read(), omnisci.thrift.OmniSci.get_table_epoch_args.read(), omnisci.thrift.OmniSci.get_table_epoch_by_name_args.read(), omnisci.thrift.OmniSci.get_table_epochs_args.read(), omnisci.thrift.OmniSci.set_table_epochs_args.read(), omnisci.thrift.OmniSci.get_session_info_args.read(), omnisci.thrift.OmniSci.sql_execute_args.read(), omnisci.thrift.OmniSci.sql_execute_df_args.read(), omnisci.thrift.OmniSci.sql_execute_gdf_args.read(), omnisci.thrift.OmniSci.deallocate_df_args.read(), omnisci.thrift.OmniSci.sql_validate_args.read(), omnisci.thrift.OmniSci.get_completion_hints_args.read(), omnisci.thrift.OmniSci.set_execution_mode_args.read(), omnisci.thrift.OmniSci.render_vega_args.read(), omnisci.thrift.OmniSci.get_result_row_for_pixel_args.read(), omnisci.thrift.OmniSci.get_dashboard_args.read(), omnisci.thrift.OmniSci.get_dashboards_args.read(), omnisci.thrift.OmniSci.create_dashboard_args.read(), omnisci.thrift.OmniSci.replace_dashboard_args.read(), omnisci.thrift.OmniSci.delete_dashboard_args.read(), omnisci.thrift.OmniSci.share_dashboards_args.read(), omnisci.thrift.OmniSci.delete_dashboards_args.read(), omnisci.thrift.OmniSci.share_dashboard_args.read(), omnisci.thrift.OmniSci.unshare_dashboard_args.read(), omnisci.thrift.OmniSci.unshare_dashboards_args.read(), omnisci.thrift.OmniSci.get_dashboard_grantees_args.read(), omnisci.thrift.OmniSci.get_link_view_args.read(), omnisci.thrift.OmniSci.create_link_args.read(), omnisci.thrift.OmniSci.load_table_binary_args.read(), omnisci.thrift.OmniSci.load_table_binary_columnar_args.read(), omnisci.thrift.OmniSci.load_table_binary_arrow_args.read(), omnisci.thrift.OmniSci.load_table_args.read(), omnisci.thrift.OmniSci.detect_column_types_args.read(), omnisci.thrift.OmniSci.create_table_args.read(), omnisci.thrift.OmniSci.import_table_args.read(), omnisci.thrift.OmniSci.import_geo_table_args.read(), omnisci.thrift.OmniSci.import_table_status_args.read(), omnisci.thrift.OmniSci.get_first_geo_file_in_archive_args.read(), omnisci.thrift.OmniSci.get_all_files_in_archive_args.read(), omnisci.thrift.OmniSci.get_layers_in_geo_file_args.read(), omnisci.thrift.OmniSci.query_get_outer_fragment_count_args.read(), omnisci.thrift.OmniSci.check_table_consistency_args.read(), omnisci.thrift.OmniSci.start_render_query_args.read(), omnisci.thrift.OmniSci.insert_data_args.read(), omnisci.thrift.OmniSci.checkpoint_args.read(), omnisci.thrift.OmniSci.get_roles_args.read(), omnisci.thrift.OmniSci.get_db_objects_for_grantee_args.read(), omnisci.thrift.OmniSci.get_db_object_privs_args.read(), omnisci.thrift.OmniSci.get_all_roles_for_user_args.read(), omnisci.thrift.OmniSci.has_role_args.read(), omnisci.thrift.OmniSci.has_object_privilege_args.read(), omnisci.thrift.OmniSci.set_license_key_args.read(), omnisci.thrift.OmniSci.get_license_claims_args.read(), omnisci.thrift.OmniSci.get_device_parameters_args.read(), omnisci.thrift.OmniSci.register_runtime_extension_functions_args.read(), com.mapd.utility.SQLImporter.tableExists(), omnisci.thrift.OmniSci.disconnect_args.write(), omnisci.thrift.OmniSci.switch_database_args.write(), omnisci.thrift.OmniSci.clone_session_args.write(), omnisci.thrift.OmniSci.get_server_status_args.write(), omnisci.thrift.OmniSci.get_status_args.write(), omnisci.thrift.OmniSci.get_hardware_info_args.write(), omnisci.thrift.OmniSci.get_tables_args.write(), omnisci.thrift.OmniSci.get_physical_tables_args.write(), omnisci.thrift.OmniSci.get_views_args.write(), omnisci.thrift.OmniSci.get_tables_meta_args.write(), omnisci.thrift.OmniSci.get_table_details_args.write(), omnisci.thrift.OmniSci.get_internal_table_details_args.write(), omnisci.thrift.OmniSci.get_users_args.write(), omnisci.thrift.OmniSci.get_databases_args.write(), omnisci.thrift.OmniSci.start_heap_profile_args.write(), omnisci.thrift.OmniSci.stop_heap_profile_args.write(), omnisci.thrift.OmniSci.get_heap_profile_args.write(), omnisci.thrift.OmniSci.get_memory_args.write(), omnisci.thrift.OmniSci.clear_cpu_memory_args.write(), omnisci.thrift.OmniSci.clear_gpu_memory_args.write(), omnisci.thrift.OmniSci.set_table_epoch_args.write(), omnisci.thrift.OmniSci.set_table_epoch_by_name_args.write(), omnisci.thrift.OmniSci.get_table_epoch_args.write(), omnisci.thrift.OmniSci.get_table_epoch_by_name_args.write(), omnisci.thrift.OmniSci.get_table_epochs_args.write(), omnisci.thrift.OmniSci.set_table_epochs_args.write(), omnisci.thrift.OmniSci.get_session_info_args.write(), omnisci.thrift.OmniSci.sql_execute_args.write(), omnisci.thrift.OmniSci.sql_execute_df_args.write(), omnisci.thrift.OmniSci.sql_execute_gdf_args.write(), omnisci.thrift.OmniSci.deallocate_df_args.write(), omnisci.thrift.OmniSci.sql_validate_args.write(), omnisci.thrift.OmniSci.get_completion_hints_args.write(), omnisci.thrift.OmniSci.set_execution_mode_args.write(), omnisci.thrift.OmniSci.render_vega_args.write(), omnisci.thrift.OmniSci.get_result_row_for_pixel_args.write(), omnisci.thrift.OmniSci.get_dashboard_args.write(), omnisci.thrift.OmniSci.get_dashboards_args.write(), omnisci.thrift.OmniSci.create_dashboard_args.write(), omnisci.thrift.OmniSci.replace_dashboard_args.write(), omnisci.thrift.OmniSci.delete_dashboard_args.write(), omnisci.thrift.OmniSci.share_dashboards_args.write(), omnisci.thrift.OmniSci.delete_dashboards_args.write(), omnisci.thrift.OmniSci.share_dashboard_args.write(), omnisci.thrift.OmniSci.unshare_dashboard_args.write(), omnisci.thrift.OmniSci.unshare_dashboards_args.write(), omnisci.thrift.OmniSci.get_dashboard_grantees_args.write(), omnisci.thrift.OmniSci.get_link_view_args.write(), omnisci.thrift.OmniSci.create_link_args.write(), omnisci.thrift.OmniSci.load_table_binary_args.write(), omnisci.thrift.OmniSci.load_table_binary_columnar_args.write(), omnisci.thrift.OmniSci.load_table_binary_arrow_args.write(), omnisci.thrift.OmniSci.load_table_args.write(), omnisci.thrift.OmniSci.detect_column_types_args.write(), omnisci.thrift.OmniSci.create_table_args.write(), omnisci.thrift.OmniSci.import_table_args.write(), omnisci.thrift.OmniSci.import_geo_table_args.write(), omnisci.thrift.OmniSci.import_table_status_args.write(), omnisci.thrift.OmniSci.get_first_geo_file_in_archive_args.write(), omnisci.thrift.OmniSci.get_all_files_in_archive_args.write(), omnisci.thrift.OmniSci.get_layers_in_geo_file_args.write(), omnisci.thrift.OmniSci.query_get_outer_fragment_count_args.write(), omnisci.thrift.OmniSci.check_table_consistency_args.write(), omnisci.thrift.OmniSci.start_render_query_args.write(), omnisci.thrift.OmniSci.insert_data_args.write(), omnisci.thrift.OmniSci.checkpoint_args.write(), omnisci.thrift.OmniSci.get_roles_args.write(), omnisci.thrift.OmniSci.get_db_objects_for_grantee_args.write(), omnisci.thrift.OmniSci.get_db_object_privs_args.write(), omnisci.thrift.OmniSci.get_all_roles_for_user_args.write(), omnisci.thrift.OmniSci.has_role_args.write(), omnisci.thrift.OmniSci.has_object_privilege_args.write(), omnisci.thrift.OmniSci.set_license_key_args.write(), omnisci.thrift.OmniSci.get_license_claims_args.write(), omnisci.thrift.OmniSci.get_device_parameters_args.write(), and omnisci.thrift.OmniSci.register_runtime_extension_functions_args.write().

Db_vendor_types com.mapd.utility.SQLImporter.vendor_types = null
package

Definition at line 316 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().


The documentation for this class was generated from the following file: