OmniSciDB  04ee39c94c
run-benchmark Namespace Reference

Functions

def get_connection (kwargs)
 
def validate_query_file (kwargs)
 
def execute_query (kwargs)
 
def calculate_query_times (kwargs)
 
def get_mem_usage (kwargs)
 
def json_format_handler (x)
 

Variables

 parser = ArgumentParser()
 
 optional = parser._action_groups.pop()
 
 required = parser.add_argument_group("required arguments")
 
 action
 
 help
 
 dest
 
 default
 
 type
 
 True
 
 args = parser.parse_args()
 
 level
 
 source_db_user = args.user
 
 source_db_passwd = args.passwd
 
 source_db_server = args.server
 
 source_db_port = args.port
 
 source_db_name = args.name
 
 source_table = args.table
 
 label = args.label
 
 queries_dir = args.queries_dir
 
 iterations = int(args.iterations)
 
 gpu_count = args.gpu_count
 
 gpu_name = args.gpu_name
 
 no_gather_conn_gpu_info = args.no_gather_conn_gpu_info
 
 gather_nvml_gpu_info = args.gather_nvml_gpu_info
 
 no_gather_nvml_gpu_info = args.no_gather_nvml_gpu_info
 
 machine_name = args.machine_name
 
 machine_uname = args.machine_uname
 
 destinations = args.destination.split(",")
 
bool valid_destination_set = True
 
 dest_db_user = args.dest_user
 
 dest_db_passwd = args.dest_passwd
 
 dest_db_server = args.dest_server
 
 dest_db_port = args.dest_port
 
 dest_db_name = args.dest_name
 
 dest_table = args.dest_table
 
 dest_table_schema_file = args.dest_table_schema_file
 
 output_file_json = args.output_file_json
 
 output_file_jenkins = args.output_file_jenkins
 
 output_tag_jenkins = args.output_tag_jenkins
 
def con
 
 run_guid = str(uuid.uuid4())
 
 run_timestamp = datetime.datetime.now()
 
 run_connection = str(con)
 
string run_driver = ""
 
def run_version = con._client.get_version()
 
def run_version_short = run_version.split("-")[0]
 
 conn_machine_name = re.search(r"@(.*?):", run_connection).group(1)
 
 conn_gpu_count = None
 
 source_db_gpu_count = None
 
 source_db_gpu_mem = None
 
string source_db_gpu_driver_ver = ""
 
string source_db_gpu_name = ""
 
def conn_hardware_info = con._client.get_hardware_info(con._session)
 
 handle = pynvml.nvmlDeviceGetHandleByIndex(i)
 
 local_uname = os.uname()
 
 run_machine_name = machine_name
 
 run_machine_uname = machine_uname
 
list query_list = []
 
 query_filename
 
 query_mapdql = query_filepath.read().replace("\n", " ")
 
 query_id = query["name"].rsplit(".")[0]
 
list query_results = []
 
 query_total_start_time = timeit.default_timer()
 
def pre_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
def pre_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
def query_result
 
def post_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
def post_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
 query_cpu_mem_usage
 
 query_gpu_mem_usage
 
string query_error_info = ""
 
 first_execution_time = round(query_result["execution_time"], 1)
 
 first_connect_time = round(query_result["connect_time"], 1)
 
 first_results_iter_time
 
tuple first_total_time
 
 first_cpu_mem_usage = query_cpu_mem_usage
 
 first_gpu_mem_usage = query_gpu_mem_usage
 
 query_total_elapsed_time
 
 execution_times
 
 connect_times
 
 results_iter_times
 
 total_times
 
def result_count = query_result["result_count"]
 
def query_times
 
list succesful_query_list = query_list
 
 query_list_json = json.dumps(query_list, default=json_format_handler, indent=2)
 
 results_df = DataFrame(query_results)
 
def dest_con
 
def tables = dest_con.get_tables()
 
 create_table_sql = table_schema.read().replace("\n", " ")
 
def res = dest_con.execute(create_table_sql)
 
 preserve_index
 
 chunk_size_bytes
 
 col_names_from_schema
 
 file_json_open = open(output_file_json, "w")
 
list jenkins_bench_results = []
 
 jenkins_bench_json
 
 file_jenkins_open = open(output_file_jenkins, "w")
 

Function Documentation

◆ calculate_query_times()

def run-benchmark.calculate_query_times (   kwargs)
  Calculates aggregate query times from all iteration times

  Kwargs:
    total_times(list): List of total time calculations
    execution_times(list): List of execution_time calculations
    results_iter_times(list): List of results_iter_time calculations
    connect_times(list): List of connect_time calculations

  Returns:
    query_execution(dict): Query times
    False(bool): The query failed. Exception should be logged.

Definition at line 151 of file run-benchmark.py.

151 def calculate_query_times(**kwargs):
152  """
153  Calculates aggregate query times from all iteration times
154 
155  Kwargs:
156  total_times(list): List of total time calculations
157  execution_times(list): List of execution_time calculations
158  results_iter_times(list): List of results_iter_time calculations
159  connect_times(list): List of connect_time calculations
160 
161  Returns:
162  query_execution(dict): Query times
163  False(bool): The query failed. Exception should be logged.
164  """
165  return {
166  "total_time_avg": round(numpy.mean(kwargs["total_times"]), 1),
167  "total_time_min": round(numpy.min(kwargs["total_times"]), 1),
168  "total_time_max": round(numpy.max(kwargs["total_times"]), 1),
169  "total_time_85": round(numpy.percentile(kwargs["total_times"], 85), 1),
170  "execution_time_avg": round(numpy.mean(kwargs["execution_times"]), 1),
171  "execution_time_min": round(numpy.min(kwargs["execution_times"]), 1),
172  "execution_time_max": round(numpy.max(kwargs["execution_times"]), 1),
173  "execution_time_85": round(
174  numpy.percentile(kwargs["execution_times"], 85), 1
175  ),
176  "execution_time_25": round(
177  numpy.percentile(kwargs["execution_times"], 25), 1
178  ),
179  "execution_time_std": round(numpy.std(kwargs["execution_times"]), 1),
180  "connect_time_avg": round(numpy.mean(kwargs["connect_times"]), 1),
181  "connect_time_min": round(numpy.min(kwargs["connect_times"]), 1),
182  "connect_time_max": round(numpy.max(kwargs["connect_times"]), 1),
183  "connect_time_85": round(
184  numpy.percentile(kwargs["connect_times"], 85), 1
185  ),
186  "results_iter_time_avg": round(
187  numpy.mean(kwargs["results_iter_times"]), 1
188  ),
189  "results_iter_time_min": round(
190  numpy.min(kwargs["results_iter_times"]), 1
191  ),
192  "results_iter_time_max": round(
193  numpy.max(kwargs["results_iter_times"]), 1
194  ),
195  "results_iter_time_85": round(
196  numpy.percentile(kwargs["results_iter_times"], 85), 1
197  ),
198  }
199 
200 
def calculate_query_times(kwargs)

◆ execute_query()

def run-benchmark.execute_query (   kwargs)
  Executes a query against the connected db using pymapd
  https://pymapd.readthedocs.io/en/latest/usage.html#querying

  Kwargs:
    query_name(str): Name of query
    query_mapdql(str): Query to run
    iteration(int): Iteration number

  Returns:
    query_execution(dict):::
      result_count(int): Number of results returned
      execution_time(float): Time (in ms) that pymapd reports
                             backend spent on query.
      connect_time(float): Time (in ms) for overhead of query, calculated
                           by subtracting backend execution time
                           from time spent on the execution function.
      results_iter_time(float): Time (in ms) it took to for
                                pymapd.fetchone() to iterate through all
                                of the results.
      total_time(float): Time (in ms) from adding all above times.
    False(bool): The query failed. Exception should be logged.

Definition at line 70 of file run-benchmark.py.

70 def execute_query(**kwargs):
71  """
72  Executes a query against the connected db using pymapd
73  https://pymapd.readthedocs.io/en/latest/usage.html#querying
74 
75  Kwargs:
76  query_name(str): Name of query
77  query_mapdql(str): Query to run
78  iteration(int): Iteration number
79 
80  Returns:
81  query_execution(dict):::
82  result_count(int): Number of results returned
83  execution_time(float): Time (in ms) that pymapd reports
84  backend spent on query.
85  connect_time(float): Time (in ms) for overhead of query, calculated
86  by subtracting backend execution time
87  from time spent on the execution function.
88  results_iter_time(float): Time (in ms) it took to for
89  pymapd.fetchone() to iterate through all
90  of the results.
91  total_time(float): Time (in ms) from adding all above times.
92  False(bool): The query failed. Exception should be logged.
93  """
94  start_time = timeit.default_timer()
95  try:
96  # Run the query
97  query_result = con.execute(kwargs["query_mapdql"])
98  logging.debug(
99  "Completed iteration "
100  + str(kwargs["iteration"])
101  + " of query "
102  + kwargs["query_name"]
103  )
104  except (pymapd.exceptions.ProgrammingError, pymapd.exceptions.Error):
105  logging.exception(
106  "Error running query "
107  + kwargs["query_name"]
108  + " during iteration "
109  + str(kwargs["iteration"])
110  )
111  return False
112 
113  # Calculate times
114  query_elapsed_time = (timeit.default_timer() - start_time) * 1000
115  execution_time = query_result._result.execution_time_ms
116  connect_time = round((query_elapsed_time - execution_time), 1)
117 
118  # Iterate through each result from the query
119  logging.debug(
120  "Counting results from query"
121  + kwargs["query_name"]
122  + " iteration "
123  + str(kwargs["iteration"])
124  )
125  result_count = 0
126  start_time = timeit.default_timer()
127  while query_result.fetchone():
128  result_count += 1
129  results_iter_time = round(
130  ((timeit.default_timer() - start_time) * 1000), 1
131  )
132 
133  query_execution = {
134  "result_count": result_count,
135  "execution_time": execution_time,
136  "connect_time": connect_time,
137  "results_iter_time": results_iter_time,
138  "total_time": execution_time + connect_time + results_iter_time,
139  }
140  logging.debug(
141  "Execution results for query"
142  + kwargs["query_name"]
143  + " iteration "
144  + str(kwargs["iteration"])
145  + ": "
146  + str(query_execution)
147  )
148  return query_execution
149 
150 
def execute_query(kwargs)

◆ get_connection()

def run-benchmark.get_connection (   kwargs)
  Connects to the db using pymapd
  https://pymapd.readthedocs.io/en/latest/usage.html#connecting

  Kwargs:
    db_user(str): DB username
    db_passwd(str): DB password
    db_server(str): DB host
    db_port(int): DB port
    db_name(str): DB name

  Returns:
    con(class): Connection class
    False(bool): The connection failed. Exception should be logged.

Definition at line 16 of file run-benchmark.py.

16 def get_connection(**kwargs):
17  """
18  Connects to the db using pymapd
19  https://pymapd.readthedocs.io/en/latest/usage.html#connecting
20 
21  Kwargs:
22  db_user(str): DB username
23  db_passwd(str): DB password
24  db_server(str): DB host
25  db_port(int): DB port
26  db_name(str): DB name
27 
28  Returns:
29  con(class): Connection class
30  False(bool): The connection failed. Exception should be logged.
31  """
32  try:
33  logging.debug("Connecting to mapd db...")
34  con = pymapd.connect(
35  user=kwargs["db_user"],
36  password=kwargs["db_passwd"],
37  host=kwargs["db_server"],
38  port=kwargs["db_port"],
39  dbname=kwargs["db_name"],
40  )
41  logging.info("Succesfully connected to mapd db")
42  return con
43  except (pymapd.exceptions.OperationalError, pymapd.exceptions.Error):
44  logging.exception("Error connecting to database.")
45  return False
46 
47 
def get_connection(kwargs)

◆ get_mem_usage()

def run-benchmark.get_mem_usage (   kwargs)
  Calculates memory statistics from mapd_server _client.get_memory call

  Kwargs:
    con(class 'pymapd.connection.Connection'): Mapd connection
    mem_type(str): [gpu, cpu] Type of memory to gather metrics for

  Returns:
    ramusage(dict):::
      usedram(float): Amount of memory (in MB) used
      freeram(float): Amount of memory (in MB) free
      totalallocated(float): Total amount of memory (in MB) allocated
      errormessage(str): Error if returned by get_memory call
      rawdata(list): Raw data returned from get_memory call

Definition at line 201 of file run-benchmark.py.

201 def get_mem_usage(**kwargs):
202  """
203  Calculates memory statistics from mapd_server _client.get_memory call
204 
205  Kwargs:
206  con(class 'pymapd.connection.Connection'): Mapd connection
207  mem_type(str): [gpu, cpu] Type of memory to gather metrics for
208 
209  Returns:
210  ramusage(dict):::
211  usedram(float): Amount of memory (in MB) used
212  freeram(float): Amount of memory (in MB) free
213  totalallocated(float): Total amount of memory (in MB) allocated
214  errormessage(str): Error if returned by get_memory call
215  rawdata(list): Raw data returned from get_memory call
216  """
217  try:
218  con_mem_data_list = con._client.get_memory(
219  session=kwargs["con"]._session, memory_level=kwargs["mem_type"]
220  )
221  usedram = 0
222  freeram = 0
223  for con_mem_data in con_mem_data_list:
224  page_size = con_mem_data.page_size
225  node_memory_data_list = con_mem_data.node_memory_data
226  for node_memory_data in node_memory_data_list:
227  ram = node_memory_data.num_pages * page_size
228  is_free = node_memory_data.is_free
229  if is_free:
230  freeram += ram
231  else:
232  usedram += ram
233  totalallocated = usedram + freeram
234  if totalallocated > 0:
235  totalallocated = round(totalallocated / 1024 / 1024, 1)
236  usedram = round(usedram / 1024 / 1024, 1)
237  freeram = round(freeram / 1024 / 1024, 1)
238  ramusage = {}
239  ramusage["usedram"] = usedram
240  ramusage["freeram"] = freeram
241  ramusage["totalallocated"] = totalallocated
242  ramusage["errormessage"] = ""
243  except Exception as e:
244  errormessage = "Get memory failed with error: " + str(e)
245  logging.error(errormessage)
246  ramusage["errormessage"] = errormessage
247  return ramusage
248 
249 
def get_mem_usage(kwargs)

◆ json_format_handler()

def run-benchmark.json_format_handler (   x)

Definition at line 250 of file run-benchmark.py.

250 def json_format_handler(x):
251  # Function to allow json to deal with datetime and numpy int
252  if isinstance(x, datetime.datetime):
253  return x.isoformat()
254  if isinstance(x, numpy.int64):
255  return int(x)
256  raise TypeError("Unknown type")
257 
258 
259 # Parse input parameters
def json_format_handler(x)

◆ validate_query_file()

def run-benchmark.validate_query_file (   kwargs)
  Validates query file. Currently only checks the query file name

  Kwargs:
    query_filename(str): Name of query file

  Returns:
    True(bool): Query succesfully validated
    False(bool): Query failed validation

Definition at line 48 of file run-benchmark.py.

48 def validate_query_file(**kwargs):
49  """
50  Validates query file. Currently only checks the query file name
51 
52  Kwargs:
53  query_filename(str): Name of query file
54 
55  Returns:
56  True(bool): Query succesfully validated
57  False(bool): Query failed validation
58  """
59  if not kwargs["query_filename"].endswith(".sql"):
60  logging.warning(
61  "Query filename "
62  + kwargs["query_filename"]
63  + ' is invalid - does not end in ".sql". Skipping'
64  )
65  return False
66  else:
67  return True
68 
69 
def validate_query_file(kwargs)

Variable Documentation

◆ action

run-benchmark.action

Definition at line 265 of file run-benchmark.py.

◆ args

run-benchmark.args = parser.parse_args()

Definition at line 461 of file run-benchmark.py.

◆ chunk_size_bytes

run-benchmark.chunk_size_bytes

Definition at line 910 of file run-benchmark.py.

◆ col_names_from_schema

run-benchmark.col_names_from_schema

Definition at line 911 of file run-benchmark.py.

◆ con

def run-benchmark.con
Initial value:
2  db_user=source_db_user,
3  db_passwd=source_db_passwd,
4  db_server=source_db_server,
5  db_port=source_db_port,
6  db_name=source_db_name,
7 )
def get_connection(kwargs)

Definition at line 536 of file run-benchmark.py.

◆ conn_gpu_count

def run-benchmark.conn_gpu_count = None

Definition at line 560 of file run-benchmark.py.

◆ conn_hardware_info

def run-benchmark.conn_hardware_info = con._client.get_hardware_info(con._session)

Definition at line 577 of file run-benchmark.py.

◆ conn_machine_name

run-benchmark.conn_machine_name = re.search(r"@(.*?):", run_connection).group(1)

Definition at line 558 of file run-benchmark.py.

◆ connect_times

run-benchmark.connect_times

Definition at line 762 of file run-benchmark.py.

◆ create_table_sql

run-benchmark.create_table_sql = table_schema.read().replace("\n", " ")

Definition at line 891 of file run-benchmark.py.

◆ default

run-benchmark.default

Definition at line 274 of file run-benchmark.py.

◆ dest

run-benchmark.dest

Definition at line 274 of file run-benchmark.py.

◆ dest_con

def run-benchmark.dest_con
Initial value:
2  db_user=dest_db_user,
3  db_passwd=dest_db_passwd,
4  db_server=dest_db_server,
5  db_port=dest_db_port,
6  db_name=dest_db_name,
7  )
def get_connection(kwargs)

Definition at line 873 of file run-benchmark.py.

◆ dest_db_name

run-benchmark.dest_db_name = args.dest_name

Definition at line 503 of file run-benchmark.py.

◆ dest_db_passwd

run-benchmark.dest_db_passwd = args.dest_passwd

Definition at line 495 of file run-benchmark.py.

◆ dest_db_port

run-benchmark.dest_db_port = args.dest_port

Definition at line 502 of file run-benchmark.py.

◆ dest_db_server

run-benchmark.dest_db_server = args.dest_server

Definition at line 501 of file run-benchmark.py.

◆ dest_db_user

run-benchmark.dest_db_user = args.dest_user

Definition at line 494 of file run-benchmark.py.

◆ dest_table

run-benchmark.dest_table = args.dest_table

Definition at line 504 of file run-benchmark.py.

◆ dest_table_schema_file

run-benchmark.dest_table_schema_file = args.dest_table_schema_file

Definition at line 505 of file run-benchmark.py.

◆ destinations

run-benchmark.destinations = args.destination.split(",")

Definition at line 491 of file run-benchmark.py.

◆ execution_times

run-benchmark.execution_times

Definition at line 762 of file run-benchmark.py.

◆ file_jenkins_open

run-benchmark.file_jenkins_open = open(output_file_jenkins, "w")

Definition at line 954 of file run-benchmark.py.

◆ file_json_open

run-benchmark.file_json_open = open(output_file_json, "w")

Definition at line 917 of file run-benchmark.py.

◆ first_connect_time

run-benchmark.first_connect_time = round(query_result["connect_time"], 1)

Definition at line 711 of file run-benchmark.py.

◆ first_cpu_mem_usage

run-benchmark.first_cpu_mem_usage = query_cpu_mem_usage

Definition at line 720 of file run-benchmark.py.

◆ first_execution_time

run-benchmark.first_execution_time = round(query_result["execution_time"], 1)

Definition at line 710 of file run-benchmark.py.

◆ first_gpu_mem_usage

run-benchmark.first_gpu_mem_usage = query_gpu_mem_usage

Definition at line 721 of file run-benchmark.py.

◆ first_results_iter_time

run-benchmark.first_results_iter_time
Initial value:
1 = round(
2  query_result["results_iter_time"], 1
3  )

Definition at line 712 of file run-benchmark.py.

◆ first_total_time

tuple run-benchmark.first_total_time
Initial value:
1 = (
2  first_execution_time
3  + first_connect_time
4  + first_results_iter_time
5  )

Definition at line 715 of file run-benchmark.py.

◆ gather_nvml_gpu_info

run-benchmark.gather_nvml_gpu_info = args.gather_nvml_gpu_info

Definition at line 487 of file run-benchmark.py.

◆ gpu_count

run-benchmark.gpu_count = args.gpu_count

Definition at line 484 of file run-benchmark.py.

Referenced by get_available_gpus(), get_context_count(), and is_unnest().

◆ gpu_name

run-benchmark.gpu_name = args.gpu_name

Definition at line 485 of file run-benchmark.py.

◆ handle

◆ help

run-benchmark.help

Definition at line 265 of file run-benchmark.py.

◆ iterations

◆ jenkins_bench_json

run-benchmark.jenkins_bench_json
Initial value:
1 = json.dumps(
2  {
3  "groups": [
4  {
5  "name": source_table + output_tag_jenkins,
6  "description": "Source table: " + source_table,
7  "tests": jenkins_bench_results,
8  }
9  ]
10  }
11  )

Definition at line 941 of file run-benchmark.py.

◆ jenkins_bench_results

list run-benchmark.jenkins_bench_results = []

Definition at line 923 of file run-benchmark.py.

◆ label

run-benchmark.label = args.label

Definition at line 474 of file run-benchmark.py.

◆ level

run-benchmark.level

Definition at line 463 of file run-benchmark.py.

◆ local_uname

run-benchmark.local_uname = os.uname()

Definition at line 622 of file run-benchmark.py.

◆ machine_name

run-benchmark.machine_name = args.machine_name

Definition at line 489 of file run-benchmark.py.

◆ machine_uname

run-benchmark.machine_uname = args.machine_uname

Definition at line 490 of file run-benchmark.py.

◆ no_gather_conn_gpu_info

run-benchmark.no_gather_conn_gpu_info = args.no_gather_conn_gpu_info

Definition at line 486 of file run-benchmark.py.

◆ no_gather_nvml_gpu_info

bool run-benchmark.no_gather_nvml_gpu_info = args.no_gather_nvml_gpu_info

Definition at line 488 of file run-benchmark.py.

◆ optional

run-benchmark.optional = parser._action_groups.pop()

Definition at line 261 of file run-benchmark.py.

◆ output_file_jenkins

run-benchmark.output_file_jenkins = args.output_file_jenkins

Definition at line 528 of file run-benchmark.py.

◆ output_file_json

run-benchmark.output_file_json = args.output_file_json

Definition at line 515 of file run-benchmark.py.

◆ output_tag_jenkins

run-benchmark.output_tag_jenkins = args.output_tag_jenkins

Definition at line 529 of file run-benchmark.py.

◆ parser

run-benchmark.parser = ArgumentParser()

Definition at line 260 of file run-benchmark.py.

◆ post_query_cpu_mem_usage

def run-benchmark.post_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")

Definition at line 691 of file run-benchmark.py.

◆ post_query_gpu_mem_usage

def run-benchmark.post_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")

Definition at line 693 of file run-benchmark.py.

◆ pre_query_cpu_mem_usage

def run-benchmark.pre_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")

Definition at line 674 of file run-benchmark.py.

◆ pre_query_gpu_mem_usage

def run-benchmark.pre_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")

Definition at line 676 of file run-benchmark.py.

◆ preserve_index

run-benchmark.preserve_index

Definition at line 909 of file run-benchmark.py.

◆ queries_dir

run-benchmark.queries_dir = args.queries_dir

Definition at line 476 of file run-benchmark.py.

◆ query_cpu_mem_usage

run-benchmark.query_cpu_mem_usage
Initial value:
1 = round(
2  post_query_cpu_mem_usage["usedram"]
3  - pre_query_cpu_mem_usage["usedram"],
4  1,
5  )

Definition at line 695 of file run-benchmark.py.

◆ query_error_info

string run-benchmark.query_error_info = ""

Definition at line 707 of file run-benchmark.py.

◆ query_filename

run-benchmark.query_filename

Definition at line 644 of file run-benchmark.py.

◆ query_gpu_mem_usage

run-benchmark.query_gpu_mem_usage
Initial value:
1 = round(
2  post_query_gpu_mem_usage["usedram"]
3  - pre_query_gpu_mem_usage["usedram"],
4  1,
5  )

Definition at line 700 of file run-benchmark.py.

◆ query_id

run-benchmark.query_id = query["name"].rsplit(".")[0]

◆ query_list

list run-benchmark.query_list = []

Definition at line 639 of file run-benchmark.py.

◆ query_list_json

run-benchmark.query_list_json = json.dumps(query_list, default=json_format_handler, indent=2)

Definition at line 864 of file run-benchmark.py.

◆ query_mapdql

run-benchmark.query_mapdql = query_filepath.read().replace("\n", " ")

Definition at line 649 of file run-benchmark.py.

◆ query_result

def run-benchmark.query_result
Initial value:
2  query_name=query["name"],
3  query_mapdql=query["mapdql"],
4  iteration=iteration,
5  )
def execute_query(kwargs)

Definition at line 684 of file run-benchmark.py.

◆ query_results

list run-benchmark.query_results = []

Definition at line 666 of file run-benchmark.py.

◆ query_times

def run-benchmark.query_times
Initial value:
2  total_times=total_times,
3  execution_times=execution_times,
4  connect_times=connect_times,
5  results_iter_times=results_iter_times,
6  )
def calculate_query_times(kwargs)

Definition at line 778 of file run-benchmark.py.

◆ query_total_elapsed_time

run-benchmark.query_total_elapsed_time
Initial value:
1 = round(
2  ((timeit.default_timer() - query_total_start_time) * 1000), 1
3  )

Definition at line 756 of file run-benchmark.py.

◆ query_total_start_time

run-benchmark.query_total_start_time = timeit.default_timer()

Definition at line 670 of file run-benchmark.py.

◆ required

run-benchmark.required = parser.add_argument_group("required arguments")

Definition at line 262 of file run-benchmark.py.

◆ res

def run-benchmark.res = dest_con.execute(create_table_sql)

Definition at line 900 of file run-benchmark.py.

◆ result_count

def run-benchmark.result_count = query_result["result_count"]

Definition at line 774 of file run-benchmark.py.

Referenced by TEST().

◆ results_df

run-benchmark.results_df = DataFrame(query_results)

Definition at line 870 of file run-benchmark.py.

◆ results_iter_times

run-benchmark.results_iter_times

Definition at line 762 of file run-benchmark.py.

◆ run_connection

run-benchmark.run_connection = str(con)

Definition at line 550 of file run-benchmark.py.

◆ run_driver

string run-benchmark.run_driver = ""

Definition at line 552 of file run-benchmark.py.

◆ run_guid

run-benchmark.run_guid = str(uuid.uuid4())

Definition at line 547 of file run-benchmark.py.

◆ run_machine_name

run-benchmark.run_machine_name = machine_name

Definition at line 624 of file run-benchmark.py.

◆ run_machine_uname

string run-benchmark.run_machine_uname = machine_uname

Definition at line 631 of file run-benchmark.py.

◆ run_timestamp

run-benchmark.run_timestamp = datetime.datetime.now()

Definition at line 549 of file run-benchmark.py.

◆ run_version

def run-benchmark.run_version = con._client.get_version()

Definition at line 553 of file run-benchmark.py.

◆ run_version_short

def run-benchmark.run_version_short = run_version.split("-")[0]

Definition at line 555 of file run-benchmark.py.

◆ source_db_gpu_count

run-benchmark.source_db_gpu_count = None

Definition at line 561 of file run-benchmark.py.

◆ source_db_gpu_driver_ver

run-benchmark.source_db_gpu_driver_ver = ""

Definition at line 563 of file run-benchmark.py.

◆ source_db_gpu_mem

run-benchmark.source_db_gpu_mem = None

Definition at line 562 of file run-benchmark.py.

◆ source_db_gpu_name

run-benchmark.source_db_gpu_name = ""

Definition at line 564 of file run-benchmark.py.

◆ source_db_name

run-benchmark.source_db_name = args.name

Definition at line 472 of file run-benchmark.py.

◆ source_db_passwd

run-benchmark.source_db_passwd = args.passwd

Definition at line 469 of file run-benchmark.py.

◆ source_db_port

run-benchmark.source_db_port = args.port

Definition at line 471 of file run-benchmark.py.

◆ source_db_server

run-benchmark.source_db_server = args.server

Definition at line 470 of file run-benchmark.py.

◆ source_db_user

run-benchmark.source_db_user = args.user

Definition at line 468 of file run-benchmark.py.

◆ source_table

run-benchmark.source_table = args.table

Definition at line 473 of file run-benchmark.py.

◆ succesful_query_list

list run-benchmark.succesful_query_list = query_list

Definition at line 855 of file run-benchmark.py.

◆ tables

def run-benchmark.tables = dest_con.get_tables()

Definition at line 883 of file run-benchmark.py.

◆ total_times

run-benchmark.total_times

Definition at line 762 of file run-benchmark.py.

◆ True

run-benchmark.True

Definition at line 302 of file run-benchmark.py.

◆ type

run-benchmark.type

Definition at line 294 of file run-benchmark.py.

◆ valid_destination_set

bool run-benchmark.valid_destination_set = True

Definition at line 493 of file run-benchmark.py.