OmniSciDB  95562058bd
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
SQLImporter.java
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 package com.mapd.utility;
17 
18 import static java.lang.Math.pow;
19 import static java.lang.System.exit;
20 
23 import com.omnisci.thrift.server.*;
24 
25 import org.apache.commons.cli.*;
26 import org.apache.thrift.TException;
27 import org.apache.thrift.protocol.TBinaryProtocol;
28 import org.apache.thrift.protocol.TJSONProtocol;
29 import org.apache.thrift.protocol.TProtocol;
30 import org.apache.thrift.transport.TSocket;
31 import org.apache.thrift.transport.TTransport;
32 import org.apache.thrift.transport.TTransportException;
33 import org.slf4j.Logger;
34 import org.slf4j.LoggerFactory;
35 
36 import java.io.BufferedReader;
37 import java.io.FileReader;
38 import java.io.IOException;
39 import java.math.BigDecimal;
40 import java.security.KeyStore;
41 import java.sql.*;
42 import java.time.*;
43 import java.util.ArrayList;
44 import java.util.List;
45 
46 interface DateTimeUtils {
47  long getSecondsFromMilliseconds(long milliseconds);
48 }
49 
50 class MutuallyExlusiveOptionsException extends ParseException {
51  protected MutuallyExlusiveOptionsException(String message) {
52  super(message);
53  }
54 
55  public static MutuallyExlusiveOptionsException create(String errMsg, String[] strings) {
56  StringBuffer sb = new StringBuffer(
57  "Mutually exclusive options used. " + errMsg + ". Options provided [");
58  for (String s : strings) {
59  sb.append(s);
60  sb.append(" ");
61  }
62  sb.setCharAt(sb.length() - 1, ']');
63  return new MutuallyExlusiveOptionsException(sb.toString());
64  }
65 }
67  private Options options = new Options();
68 
69  void printVersion() {
70  System.out.println("SQLImporter Version 4.6.0");
71  }
72 
74  StringBuffer sb = new StringBuffer("\nSQLImporter ");
75  // Ready for PKI auth
76  // sb.append("(-u <userid> -p <password> | --client-cert <key store filename>
77  sb.append("-u <userid> -p <password> [(--binary|--http|--https [--insecure])]\n");
78  sb.append("-s <omnisci server host> -db <omnisci db> --port <omnisci server port>\n");
79  // sb.append("([--ca-trust-store <ca trust store file name>]
80  // --ca-trust-store-password
81  // <trust store password> | --insecure)\n");
82  sb.append(
83  "[-d <other database JDBC drive class>] -c <other database JDBC connection string>\n");
84  sb.append(
85  "-su <other database user> -sp <other database user password> -su <other database sql statement>\n");
86  sb.append(
87  "-t <OmniSci target table> -b <transfer buffer size> -f <table fragment size>\n");
88  sb.append("[-tr] -i <init commands file>\n");
89  sb.append("\nSQLImporter -h | --help\n\n");
90 
91  HelpFormatter formatter = new HelpFormatter();
92  // Forces help to print out options in order they were added rather
93  // than in alphabetical order
94  formatter.setOptionComparator(null);
95  int help_width = 100;
96  formatter.printHelp(help_width, sb.toString(), "", options, "");
97  }
98 
100  options.addOption("r", true, "Row Load Limit");
101 
102  // OmniSci authentication options
103  options.addOption(Option.builder("h").desc("help message").longOpt("help").build());
104  options.addOption(
105  Option.builder("u").hasArg().desc("OmniSci User").longOpt("user").build());
106  options.addOption(Option.builder("p")
107  .hasArg()
108  .desc("OmniSci Password")
109  .longOpt("passwd")
110  .build());
111  // OmniSci transport options
112  OptionGroup transport_grp = new OptionGroup();
113  transport_grp.addOption(Option.builder()
114  .desc("use binary transport to connect to OmniSci ")
115  .longOpt("binary")
116  .build());
117  transport_grp.addOption(Option.builder()
118  .desc("use http transport to connect to OmniSci ")
119  .longOpt("http")
120  .build());
121  transport_grp.addOption(Option.builder()
122  .desc("use https transport to connect to OmniSci ")
123  .longOpt("https")
124  .build());
125  options.addOptionGroup(transport_grp);
126 
127  // OmniSci database server details
128  options.addOption(Option.builder("s")
129  .hasArg()
130  .desc("OmniSci Server")
131  .longOpt("server")
132  .build());
133  options.addOption(Option.builder("db")
134  .hasArg()
135  .desc("OmniSci Database")
136  .longOpt("database")
137  .build());
138  options.addOption(
139  Option.builder().hasArg().desc("OmniSci Port").longOpt("port").build());
140 
141  // OmniSci server authentication options
142  options.addOption(Option.builder()
143  .hasArg()
144  .desc("CA certificate trust store")
145  .longOpt("ca-trust-store")
146  .build());
147  options.addOption(Option.builder()
148  .hasArg()
149  .desc("CA certificate trust store password")
150  .longOpt("ca-trust-store-passwd")
151  .build());
152  options.addOption(
153  Option.builder()
154  .desc("Inseure TLS - do not validate server OmniSci server credentials")
155  .longOpt("insecure")
156  .build());
157 
158  // Other database connection details
159  options.addOption(Option.builder("d")
160  .hasArg()
161  .desc("JDBC driver class")
162  .longOpt("driver")
163  .build());
164  options.addOption(Option.builder("c")
165  .hasArg()
166  .desc("JDBC Connection string")
167  .longOpt("jdbcConnect")
168  .required()
169  .build());
170  options.addOption(Option.builder("su")
171  .hasArg()
172  .desc("Source User")
173  .longOpt("sourceUser")
174  .required()
175  .build());
176  options.addOption(Option.builder("sp")
177  .hasArg()
178  .desc("Source Password")
179  .longOpt("sourcePasswd")
180  .required()
181  .build());
182  options.addOption(Option.builder("ss")
183  .hasArg()
184  .desc("SQL Select statement")
185  .longOpt("sqlStmt")
186  .required()
187  .build());
188 
189  options.addOption(Option.builder("t")
190  .hasArg()
191  .desc("OmniSci Target Table")
192  .longOpt("targetTable")
193  .required()
194  .build());
195 
196  options.addOption(Option.builder("b")
197  .hasArg()
198  .desc("transfer buffer size")
199  .longOpt("bufferSize")
200  .build());
201  options.addOption(Option.builder("f")
202  .hasArg()
203  .desc("table fragment size")
204  .longOpt("fragmentSize")
205  .build());
206 
207  options.addOption(Option.builder("tr")
208  .desc("Truncate table if it exists")
209  .longOpt("truncate")
210  .build());
211  options.addOption(Option.builder("i")
212  .hasArg()
213  .desc("File containing init command for DB")
214  .longOpt("initializeFile")
215  .build());
216  }
217 
218  private Option setOptionRequired(Option option) {
219  option.setRequired(true);
220  return option;
221  }
222 
223  public CommandLine parse(String[] args) throws ParseException {
224  CommandLineParser clp = new DefaultParser() {
225  public CommandLine parse(Options options, String[] strings) throws ParseException {
226  Options helpOptions = new Options();
227  helpOptions.addOption(
228  Option.builder("h").desc("help message").longOpt("help").build());
229  try {
230  CommandLine cmd = super.parse(helpOptions, strings);
231  } catch (UnrecognizedOptionException uE) {
232  }
233  if (cmd.hasOption("help")) {
235  exit(0);
236  }
237  if (cmd.hasOption("version")) {
238  printVersion();
239  exit(0);
240  }
241  cmd = super.parse(options, strings);
242  if (!cmd.hasOption("user") && !cmd.hasOption("client-cert")) {
243  throw new MissingArgumentException(
244  "Must supply either an OmniSci db user or a user certificate");
245  }
246  // if user supplied must have password and visa versa
247  if (cmd.hasOption("user") || cmd.hasOption("passwd")) {
248  options.addOption(setOptionRequired(options.getOption("user")));
249  options.addOption(setOptionRequired(options.getOption("passwd")));
250  super.parse(options, strings);
251  }
252 
253  // FUTURE USE FOR USER Auth if user client-cert supplied must have client-key
254  // and
255  // visa versa
256  if (false) {
257  if (cmd.hasOption("client-cert") || cmd.hasOption("client-key")) {
258  options.addOption(setOptionRequired(options.getOption("ca-trust-store")));
259  options.addOption(
260  setOptionRequired(options.getOption("ca-trust-store-password")));
261  super.parse(options, strings);
262  }
263  if (options.getOption("user").isRequired()
264  && options.getOption("client-key").isRequired()) {
266  MutuallyExlusiveOptionsException.create(
267  "user/password can not be use with client-cert/client-key",
268  strings);
269  throw meo;
270  }
271 
272  if (cmd.hasOption("http")
273  || cmd.hasOption("binary")
274  && (cmd.hasOption("client-cert")
275  || cmd.hasOption("client-key"))) {
276  MutuallyExlusiveOptionsException meo = MutuallyExlusiveOptionsException.create(
277  "http|binary can not be use with ca-cert|client-cert|client-key",
278  strings);
279  }
280  }
281 
282  if (cmd.hasOption("insecure") && !cmd.hasOption("https")) {
283  MutuallyExlusiveOptionsException meo = MutuallyExlusiveOptionsException.create(
284  "insecure can only be use with https", strings);
285  throw meo;
286  }
287 
288  return cmd;
289  }
290 
291  public CommandLine parse(Options options, String[] strings, boolean b)
292  throws ParseException {
293  return null;
294  }
295  };
296  return clp.parse(options, args);
297  }
298 }
299 
300 public class SQLImporter {
301  protected String session = null;
302  protected OmniSci.Client client = null;
303  private CommandLine cmd = null;
304  final static Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class);
305  private DateTimeUtils dateTimeUtils = (milliseconds) -> {
306  return milliseconds / 1000;
307  };
308 
310 
311  public static void main(String[] args) {
312  SQLImporter sq = new SQLImporter();
313  sq.doWork(args);
314  }
315 
316  void doWork(String[] args) {
317  // create Options object
318 
319  SQLImporter_args s_args = new SQLImporter_args();
320 
321  try {
322  cmd = s_args.parse(args);
323  } catch (ParseException ex) {
324  LOGGER.error(ex.getLocalizedMessage());
325  s_args.printHelpMessage();
326  exit(0);
327  }
328  executeQuery();
329  }
330 
331  void executeQuery() {
332  Connection conn = null;
333  Statement stmt = null;
334 
335  long totalTime = 0;
336 
337  try {
338  // Open a connection
339  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
340  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
341  cmd.getOptionValue("sourceUser"),
342  cmd.getOptionValue("sourcePasswd"));
343  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
344  long startTime = System.currentTimeMillis();
345 
346  // run init file script on targe DB if present
347  if (cmd.hasOption("initializeFile")) {
348  run_init(conn);
349  }
350 
351  try {
353  conn.setAutoCommit(false);
354  }
355  } catch (SQLException se) {
356  LOGGER.warn(
357  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
358  + se.toString());
359  }
360 
361  // Execute a query
362  stmt = conn.createStatement();
363 
364  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
365  // set the jdbc fetch buffer size to reduce the amount of records being moved to
366  // java from postgress
367  stmt.setFetchSize(bufferSize);
368  long timer;
369 
370  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
371 
372  // check if table already exists and is compatible in OmniSci with the query
373  // metadata
374  ResultSetMetaData md = rs.getMetaData();
375  checkMapDTable(conn, md);
376 
377  timer = System.currentTimeMillis();
378 
379  long resultCount = 0;
380  int bufferCount = 0;
381  long total = 0;
382 
383  List<TColumn> cols = new ArrayList(md.getColumnCount());
384  for (int i = 1; i <= md.getColumnCount(); i++) {
385  TColumn col = setupBinaryColumn(i, md, bufferSize);
386  cols.add(col);
387  }
388 
389  // read data from old DB
390  while (rs.next()) {
391  for (int i = 1; i <= md.getColumnCount(); i++) {
392  setColValue(rs,
393  cols.get(i - 1),
394  md.getColumnType(i),
395  i,
396  md.getScale(i),
397  md.getColumnTypeName(i));
398  }
399  resultCount++;
400  bufferCount++;
401  if (bufferCount == bufferSize) {
402  bufferCount = 0;
403  // send the buffer to mapD
404  client.load_table_binary_columnar(
405  session, cmd.getOptionValue("targetTable"), cols); // old
406  // recreate columnar store for use
407  for (int i = 1; i <= md.getColumnCount(); i++) {
408  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
409  }
410 
411  if (resultCount % 100000 == 0) {
412  LOGGER.info("Imported " + resultCount + " records");
413  }
414  }
415  }
416  if (bufferCount > 0) {
417  // send the LAST buffer to mapD
418  client.load_table_binary_columnar(
419  session, cmd.getOptionValue("targetTable"), cols);
420  bufferCount = 0;
421  }
422  LOGGER.info("result set count is " + resultCount + " read time is "
423  + (System.currentTimeMillis() - timer) + "ms");
424 
425  // Clean-up environment
426  rs.close();
427  stmt.close();
428 
429  totalTime = System.currentTimeMillis() - startTime;
430  conn.close();
431  } catch (SQLException se) {
432  LOGGER.error("SQLException - " + se.toString());
433  se.printStackTrace();
434  } catch (TOmniSciException ex) {
435  LOGGER.error("TOmniSciException - " + ex.toString());
436  ex.printStackTrace();
437  } catch (TException ex) {
438  LOGGER.error("TException failed - " + ex.toString());
439  ex.printStackTrace();
440  } finally {
441  // finally block used to close resources
442  try {
443  if (stmt != null) {
444  stmt.close();
445  }
446  } catch (SQLException se2) {
447  } // nothing we can do
448  try {
449  if (conn != null) {
450  conn.close();
451  }
452  } catch (SQLException se) {
453  LOGGER.error("SQlException in close - " + se.toString());
454  se.printStackTrace();
455  } // end finally try
456  } // end try
457  }
458 
459  private void run_init(Connection conn) {
460  // attempt to open file
461  String line = "";
462  try {
463  BufferedReader reader =
464  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
465  Statement stmt = conn.createStatement();
466  while ((line = reader.readLine()) != null) {
467  if (line.isEmpty()) {
468  continue;
469  }
470  LOGGER.info("Running : " + line);
471  stmt.execute(line);
472  }
473  stmt.close();
474  reader.close();
475  } catch (IOException e) {
476  LOGGER.error("Exception occurred trying to read initialize file: "
477  + cmd.getOptionValue("initFile"));
478  exit(1);
479  } catch (SQLException e) {
480  LOGGER.error(
481  "Exception occurred trying to execute initialize file entry : " + line);
482  exit(1);
483  }
484  }
485 
486  private void help(Options options) {
487  // automatically generate the help statement
488  HelpFormatter formatter = new HelpFormatter();
489  formatter.setOptionComparator(null); // get options in the order they are created
490  formatter.printHelp("SQLImporter", options);
491  }
492 
493  private void checkMapDTable(Connection otherdb_conn, ResultSetMetaData md)
494  throws SQLException {
496  String tName = cmd.getOptionValue("targetTable");
497 
498  if (tableExists(tName)) {
499  // check if we want to truncate
500  if (cmd.hasOption("truncate")) {
501  executeMapDCommand("Drop table " + tName);
502  createMapDTable(otherdb_conn, md);
503  } else {
504  List<TColumnType> columnInfo = getColumnInfo(tName);
505  verifyColumnSignaturesMatch(otherdb_conn, columnInfo, md);
506  }
507  } else {
508  createMapDTable(otherdb_conn, md);
509  }
510  }
511 
512  private void verifyColumnSignaturesMatch(Connection otherdb_conn,
513  List<TColumnType> dstColumns,
514  ResultSetMetaData srcColumns) throws SQLException {
515  if (srcColumns.getColumnCount() != dstColumns.size()) {
516  LOGGER.error("Table sizes do not match: Destination " + dstColumns.size()
517  + " versus Source " + srcColumns.getColumnCount());
518  exit(1);
519  }
520  for (int i = 1; i <= dstColumns.size(); ++i) {
521  if (!dstColumns.get(i - 1).getCol_name().equalsIgnoreCase(
522  srcColumns.getColumnName(i))) {
523  LOGGER.error(
524  "Destination table does not have matching column in same order for column number"
525  + i + " destination column name is " + dstColumns.get(i - 1).col_name
526  + " versus Select " + srcColumns.getColumnName(i));
527  exit(1);
528  }
529  TDatumType dstType = dstColumns.get(i - 1).getCol_type().getType();
530  int dstPrecision = dstColumns.get(i - 1).getCol_type().getPrecision();
531  int dstScale = dstColumns.get(i - 1).getCol_type().getScale();
532  int srcType = srcColumns.getColumnType(i);
533  int srcPrecision = srcColumns.getPrecision(i);
534  int srcScale = srcColumns.getScale(i);
535 
536  boolean match = false;
537  switch (srcType) {
538  case java.sql.Types.TINYINT:
539  match |= dstType == TDatumType.TINYINT;
540  // NOTE: it's okay to import smaller type to a bigger one,
541  // so we just fall through and try to match the next type.
542  // But the order of case statements is important here!
543  case java.sql.Types.SMALLINT:
544  match |= dstType == TDatumType.SMALLINT;
545  case java.sql.Types.INTEGER:
546  match |= dstType == TDatumType.INT;
547  case java.sql.Types.BIGINT:
548  match |= dstType == TDatumType.BIGINT;
549  break;
550  case java.sql.Types.DECIMAL:
551  case java.sql.Types.NUMERIC:
552  match = dstType == TDatumType.DECIMAL && dstPrecision == srcPrecision
553  && dstScale == srcScale;
554  break;
555  case java.sql.Types.FLOAT:
556  case java.sql.Types.REAL:
557  match |= dstType == TDatumType.FLOAT;
558  // Fall through and try double
559  case java.sql.Types.DOUBLE:
560  match |= dstType == TDatumType.DOUBLE;
561  break;
562  case java.sql.Types.TIME:
563  match = dstType == TDatumType.TIME;
564  break;
565  case java.sql.Types.TIMESTAMP:
566  match = dstType == TDatumType.TIMESTAMP;
567  break;
568  case java.sql.Types.DATE:
569  match = dstType == TDatumType.DATE;
570  break;
571  case java.sql.Types.BOOLEAN:
572  case java.sql.Types
573  .BIT: // deal with postgres treating boolean as bit... this will bite me
574  match = dstType == TDatumType.BOOL;
575  break;
576  case java.sql.Types.NVARCHAR:
577  case java.sql.Types.VARCHAR:
578  case java.sql.Types.NCHAR:
579  case java.sql.Types.CHAR:
580  case java.sql.Types.LONGVARCHAR:
581  case java.sql.Types.LONGNVARCHAR:
582  match = dstType == TDatumType.STR;
583  break;
584  case java.sql.Types.OTHER:
585  // NOTE: I ignore subtypes (geography vs geopetry vs none) here just because it
586  // makes no difference for OmniSciDB at the moment
587  Db_vendor_types.GisType gisType =
588  vendor_types.find_gis_type(otherdb_conn, srcColumns, i);
589  if (gisType.srid != dstScale) {
590  match = false;
591  break;
592  }
593  switch (dstType) {
594  case POINT:
595  match = gisType.type.equalsIgnoreCase("POINT");
596  break;
597  case LINESTRING:
598  match = gisType.type.equalsIgnoreCase("LINESTRING");
599  break;
600  case POLYGON:
601  match = gisType.type.equalsIgnoreCase("POLYGON");
602  break;
603  case MULTIPOLYGON:
604  match = gisType.type.equalsIgnoreCase("MULTIPOLYGON");
605  break;
606  default:
607  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
608  + " not Supported");
609  exit(1);
610  }
611  break;
612  default:
613  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
614  + " not Supported");
615  exit(1);
616  }
617  if (!match) {
618  LOGGER.error("Source and destination types for column "
619  + srcColumns.getColumnName(i)
620  + " do not match. Please make sure that type, precision and scale are exactly the same");
621  exit(1);
622  }
623  }
624  }
625 
626  private void createMapDTable(Connection otherdb_conn, ResultSetMetaData metaData) {
627  StringBuilder sb = new StringBuilder();
628  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
629 
630  // Now iterate the metadata
631  try {
632  for (int i = 1; i <= metaData.getColumnCount(); i++) {
633  if (i > 1) {
634  sb.append(",");
635  }
636  LOGGER.debug("Column name is " + metaData.getColumnName(i));
637  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
638  LOGGER.debug("Column type is " + metaData.getColumnType(i));
639 
640  sb.append(metaData.getColumnName(i)).append(" ");
641  int col_type = metaData.getColumnType(i);
642  if (col_type == java.sql.Types.OTHER) {
643  Db_vendor_types.GisType type =
644  vendor_types.find_gis_type(otherdb_conn, metaData, i);
645  sb.append(Db_vendor_types.gis_type_to_str(type));
646  } else {
647  sb.append(getColType(metaData.getColumnType(i),
648  metaData.getPrecision(i),
649  metaData.getScale(i)));
650  }
651  }
652  sb.append(")");
653 
654  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
655  sb.append(" with (fragment_size = ");
656  sb.append(cmd.getOptionValue("fragmentSize", "0"));
657  sb.append(")");
658  }
659 
660  } catch (SQLException ex) {
661  LOGGER.error("Error processing the metadata - " + ex.toString());
662  exit(1);
663  }
664 
665  executeMapDCommand(sb.toString());
666  }
667 
668  private void createMapDConnection() {
669  TTransport transport = null;
670  TProtocol protocol = new TBinaryProtocol(transport);
671  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
672  String server = cmd.getOptionValue("server", "localhost");
673  try {
674  // Uses default certificate stores.
675  boolean load_trust_store = cmd.hasOption("https");
676  SockTransportProperties skT = null;
677  if (cmd.hasOption("https")) {
678  skT = SockTransportProperties.getEncryptedClientDefaultTrustStore(
679  !cmd.hasOption("insecure"));
680  transport = skT.openHttpsClientTransport(server, port);
681  transport.open();
682  protocol = new TJSONProtocol(transport);
683  } else if (cmd.hasOption("http")) {
684  skT = SockTransportProperties.getUnencryptedClient();
685  transport = skT.openHttpClientTransport(server, port);
686  protocol = new TJSONProtocol(transport);
687  } else {
688  skT = SockTransportProperties.getUnencryptedClient();
689  transport = skT.openClientTransport(server, port);
690  transport.open();
691  protocol = new TBinaryProtocol(transport);
692  }
693 
694  client = new OmniSci.Client(protocol);
695  // This if will be useless until PKI signon
696  if (cmd.hasOption("user")) {
697  session = client.connect(cmd.getOptionValue("user", "admin"),
698  cmd.getOptionValue("passwd", "HyperInteractive"),
699  cmd.getOptionValue("database", "omnisci"));
700  }
701  LOGGER.debug("Connected session is " + session);
702 
703  } catch (TTransportException ex) {
704  LOGGER.error("Connection failed - " + ex.toString());
705  exit(1);
706  } catch (TOmniSciException ex) {
707  LOGGER.error("Connection failed - " + ex.toString());
708  exit(2);
709  } catch (TException ex) {
710  LOGGER.error("Connection failed - " + ex.toString());
711  exit(3);
712  } catch (Exception ex) {
713  LOGGER.error("General exception - " + ex.toString());
714  exit(4);
715  }
716  }
717 
718  private List<TColumnType> getColumnInfo(String tName) {
719  LOGGER.debug("Getting columns for " + tName);
720  List<TColumnType> row_descriptor = null;
721  try {
722  TTableDetails table_details = client.get_table_details(session, tName);
723  row_descriptor = table_details.row_desc;
724  } catch (TOmniSciException ex) {
725  LOGGER.error("column check failed - " + ex.toString());
726  exit(3);
727  } catch (TException ex) {
728  LOGGER.error("column check failed - " + ex.toString());
729  exit(3);
730  }
731  return row_descriptor;
732  }
733 
734  private boolean tableExists(String tName) {
735  LOGGER.debug("Check for table " + tName);
736  try {
737  List<String> recv_get_tables = client.get_tables(session);
738  for (String s : recv_get_tables) {
739  if (s.equals(tName)) {
740  return true;
741  }
742  }
743  } catch (TOmniSciException ex) {
744  LOGGER.error("Table check failed - " + ex.toString());
745  exit(3);
746  } catch (TException ex) {
747  LOGGER.error("Table check failed - " + ex.toString());
748  exit(3);
749  }
750  return false;
751  }
752 
753  private void executeMapDCommand(String sql) {
754  LOGGER.info(" run comamnd :" + sql);
755 
756  try {
757  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
758  } catch (TOmniSciException ex) {
759  LOGGER.error("SQL Execute failed - " + ex.toString());
760  exit(1);
761  } catch (TException ex) {
762  LOGGER.error("SQL Execute failed - " + ex.toString());
763  exit(1);
764  }
765  }
766 
767  private String getColType(int cType, int precision, int scale) {
768  // Note - if cType is OTHER a earlier call will have been made
769  // to try and work out the db vendors specific type.
770  if (precision > 19) {
771  precision = 19;
772  }
773  if (scale > 19) {
774  scale = 18;
775  }
776  switch (cType) {
777  case java.sql.Types.TINYINT:
778  return ("TINYINT");
779  case java.sql.Types.SMALLINT:
780  return ("SMALLINT");
781  case java.sql.Types.INTEGER:
782  return ("INTEGER");
783  case java.sql.Types.BIGINT:
784  return ("BIGINT");
785  case java.sql.Types.FLOAT:
786  return ("FLOAT");
787  case java.sql.Types.DECIMAL:
788  return ("DECIMAL(" + precision + "," + scale + ")");
789  case java.sql.Types.DOUBLE:
790  return ("DOUBLE");
791  case java.sql.Types.REAL:
792  return ("REAL");
793  case java.sql.Types.NUMERIC:
794  return ("NUMERIC(" + precision + "," + scale + ")");
795  case java.sql.Types.TIME:
796  return ("TIME");
797  case java.sql.Types.TIMESTAMP:
798  return ("TIMESTAMP");
799  case java.sql.Types.DATE:
800  return ("DATE");
801  case java.sql.Types.BOOLEAN:
802  case java.sql.Types
803  .BIT: // deal with postgress treating boolean as bit... this will bite me
804  return ("BOOLEAN");
805  case java.sql.Types.NVARCHAR:
806  case java.sql.Types.VARCHAR:
807  case java.sql.Types.NCHAR:
808  case java.sql.Types.CHAR:
809  case java.sql.Types.LONGVARCHAR:
810  case java.sql.Types.LONGNVARCHAR:
811  return ("TEXT ENCODING DICT");
812  default:
813  throw new AssertionError("Column type " + cType + " not Supported");
814  }
815  }
816 
817  private TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
818  throws SQLException {
819  TColumn col = new TColumn();
820 
821  col.nulls = new ArrayList<Boolean>(bufferSize);
822 
823  col.data = new TColumnData();
824 
825  switch (md.getColumnType(i)) {
826  case java.sql.Types.TINYINT:
827  case java.sql.Types.SMALLINT:
828  case java.sql.Types.INTEGER:
829  case java.sql.Types.BIGINT:
830  case java.sql.Types.TIME:
831  case java.sql.Types.TIMESTAMP:
832  case java.sql.Types
833  .BIT: // deal with postgress treating boolean as bit... this will bite me
834  case java.sql.Types.BOOLEAN:
835  case java.sql.Types.DATE:
836  case java.sql.Types.DECIMAL:
837  case java.sql.Types.NUMERIC:
838  col.data.int_col = new ArrayList<Long>(bufferSize);
839  break;
840 
841  case java.sql.Types.FLOAT:
842  case java.sql.Types.DOUBLE:
843  case java.sql.Types.REAL:
844  col.data.real_col = new ArrayList<Double>(bufferSize);
845  break;
846 
847  case java.sql.Types.NVARCHAR:
848  case java.sql.Types.VARCHAR:
849  case java.sql.Types.NCHAR:
850  case java.sql.Types.CHAR:
851  case java.sql.Types.LONGVARCHAR:
852  case java.sql.Types.LONGNVARCHAR:
853  case java.sql.Types.OTHER:
854  col.data.str_col = new ArrayList<String>(bufferSize);
855  break;
856 
857  default:
858  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
859  }
860  return col;
861  }
862 
863  private void setColValue(ResultSet rs,
864  TColumn col,
865  int columnType,
866  int colNum,
867  int scale,
868  String colTypeName) throws SQLException {
869  switch (columnType) {
870  case java.sql.Types
871  .BIT: // deal with postgress treating boolean as bit... this will bite me
872  case java.sql.Types.BOOLEAN:
873  Boolean b = rs.getBoolean(colNum);
874  if (rs.wasNull()) {
875  col.nulls.add(Boolean.TRUE);
876  col.data.int_col.add(0L);
877  } else {
878  col.nulls.add(Boolean.FALSE);
879  col.data.int_col.add(b ? 1L : 0L);
880  }
881  break;
882 
883  case java.sql.Types.DECIMAL:
884  case java.sql.Types.NUMERIC:
885  BigDecimal bd = rs.getBigDecimal(colNum);
886  if (rs.wasNull()) {
887  col.nulls.add(Boolean.TRUE);
888  col.data.int_col.add(0L);
889  } else {
890  col.nulls.add(Boolean.FALSE);
891  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
892  }
893  break;
894 
895  case java.sql.Types.TINYINT:
896  case java.sql.Types.SMALLINT:
897  case java.sql.Types.INTEGER:
898  case java.sql.Types.BIGINT:
899  Long l = rs.getLong(colNum);
900  if (rs.wasNull()) {
901  col.nulls.add(Boolean.TRUE);
902  col.data.int_col.add(new Long(0));
903  } else {
904  col.nulls.add(Boolean.FALSE);
905  col.data.int_col.add(l);
906  }
907  break;
908 
909  case java.sql.Types.TIME:
910  Time t = rs.getTime(colNum);
911  if (rs.wasNull()) {
912  col.nulls.add(Boolean.TRUE);
913  col.data.int_col.add(0L);
914 
915  } else {
916  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
917  col.nulls.add(Boolean.FALSE);
918  }
919 
920  break;
921  case java.sql.Types.TIMESTAMP:
922  Timestamp ts = rs.getTimestamp(colNum);
923  if (rs.wasNull()) {
924  col.nulls.add(Boolean.TRUE);
925  col.data.int_col.add(0L);
926 
927  } else {
928  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
929  col.nulls.add(Boolean.FALSE);
930  }
931 
932  break;
933  case java.sql.Types.DATE:
934  Date d = rs.getDate(colNum);
935  if (rs.wasNull()) {
936  col.nulls.add(Boolean.TRUE);
937  col.data.int_col.add(0L);
938 
939  } else {
940  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
941  col.nulls.add(Boolean.FALSE);
942  }
943  break;
944  case java.sql.Types.FLOAT:
945  case java.sql.Types.DOUBLE:
946  case java.sql.Types.REAL:
947  Double db = rs.getDouble(colNum);
948  if (rs.wasNull()) {
949  col.nulls.add(Boolean.TRUE);
950  col.data.real_col.add(new Double(0));
951 
952  } else {
953  col.nulls.add(Boolean.FALSE);
954  col.data.real_col.add(db);
955  }
956  break;
957 
958  case java.sql.Types.NVARCHAR:
959  case java.sql.Types.VARCHAR:
960  case java.sql.Types.NCHAR:
961  case java.sql.Types.CHAR:
962  case java.sql.Types.LONGVARCHAR:
963  case java.sql.Types.LONGNVARCHAR:
964  String strVal = rs.getString(colNum);
965  if (rs.wasNull()) {
966  col.nulls.add(Boolean.TRUE);
967  col.data.str_col.add("");
968 
969  } else {
970  col.data.str_col.add(strVal);
971  col.nulls.add(Boolean.FALSE);
972  }
973  break;
974  case java.sql.Types.OTHER:
975  if (rs.wasNull()) {
976  col.nulls.add(Boolean.TRUE);
977  col.data.str_col.add("");
978  } else {
979  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
980  col.nulls.add(Boolean.FALSE);
981  }
982  break;
983  default:
984  throw new AssertionError("Column type " + columnType + " not Supported");
985  }
986  }
987 
988  private void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
989  throws SQLException {
990  col.nulls.clear();
991 
992  switch (md.getColumnType(i)) {
993  case java.sql.Types.TINYINT:
994  case java.sql.Types.SMALLINT:
995  case java.sql.Types.INTEGER:
996  case java.sql.Types.BIGINT:
997  case java.sql.Types.TIME:
998  case java.sql.Types.TIMESTAMP:
999  case java.sql.Types
1000  .BIT: // deal with postgress treating boolean as bit... this will bite me
1001  case java.sql.Types.BOOLEAN:
1002  case java.sql.Types.DATE:
1003  case java.sql.Types.DECIMAL:
1004  case java.sql.Types.NUMERIC:
1005  col.data.int_col.clear();
1006  break;
1007 
1008  case java.sql.Types.FLOAT:
1009  case java.sql.Types.DOUBLE:
1010  case java.sql.Types.REAL:
1011  col.data.real_col.clear();
1012  break;
1013 
1014  case java.sql.Types.NVARCHAR:
1015  case java.sql.Types.VARCHAR:
1016  case java.sql.Types.NCHAR:
1017  case java.sql.Types.CHAR:
1018  case java.sql.Types.LONGVARCHAR:
1019  case java.sql.Types.LONGNVARCHAR:
1020  case java.sql.Types.OTHER:
1021  col.data.str_col.clear();
1022  break;
1023  default:
1024  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
1025  }
1026  }
1027 }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
Option setOptionRequired(Option option)
String getColType(int cType, int precision, int scale)
void checkMapDTable(Connection otherdb_conn, ResultSetMetaData md)
tuple line
Definition: parse_ast.py:10
void help(Options options)
static void main(String[] args)
void doWork(String[] args)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
List< TColumnType > getColumnInfo(String tName)
CommandLine parse(String[] args)
size_t append(FILE *f, const size_t size, int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:140
void createMapDTable(Connection otherdb_conn, ResultSetMetaData metaData)
void verifyColumnSignaturesMatch(Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns)
static final Logger LOGGER
boolean tableExists(String tName)
long getSecondsFromMilliseconds(long milliseconds)
void executeMapDCommand(String sql)
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
static MutuallyExlusiveOptionsException create(String errMsg, String[] strings)
void run_init(Connection conn)