OmniSciDB  dfae7c3b14
analyze_benchmark Namespace Reference

Classes

class  BenchAnalyzer
 
class  BenchmarkLoader
 
class  PrettyPrint
 

Functions

def compute_speedup (x, y)
 
def main (argv)
 

Function Documentation

◆ compute_speedup()

def analyze_benchmark.compute_speedup (   x,
  y 
)

Definition at line 136 of file analyze_benchmark.py.

Referenced by analyze_benchmark.BenchAnalyzer.findAnomaliesRatio(), and analyze_benchmark.PrettyPrint.printAttribute().

136 def compute_speedup(x, y):
137  result = []
138  zipped = list(zip(x, y))
139  for q in zipped:
140  result.append(q[0] / q[1])
141  return result
142 
143 
+ Here is the caller graph for this function:

◆ main()

def analyze_benchmark.main (   argv)

Definition at line 250 of file analyze_benchmark.py.

250 def main(argv):
251  try:
252  opts, args = getopt.getopt(
253  argv,
254  "hs:r:e:a:p",
255  [
256  "help",
257  "sample=",
258  "reference=",
259  "epsilon=",
260  "attribute=",
261  "print",
262  ],
263  )
264  except getopt.GetOptError:
265  print(
266  "python3 analyze-benchmark.py -s <sample dir> -r <reference dir> -e <epsilon> -a <attribute> -p"
267  )
268  sys.exit(2)
269 
270  dir_artifact_sample = ""
271  dir_artifact_ref = ""
272  epsilon = 0.05
273  query_attribute = (
274  "query_exec_trimmed_avg"
275  ) # default attribute to use for benchmark comparison
276 
277  to_print = False # printing all the results, disabled by default
278 
279  for opt, arg in opts:
280  if opt in ("-h", "--help"):
281  print(
282  """
283  -s/--sample:\t\t\t directory of the results for the benchmarked sample branch
284  -r/--reference:\t\t\t directory of the results for the benchmarked reference branch
285  -e/--epsilon:\t\t\t ratio tolerance for reporting results outside this range
286  -a/--attribute:\t\t\t attribute to be used for benchmark comparison (default: query_total_avg)
287  -p/--print:\t\t\t\t print all the results
288  """
289  )
290  sys.exit()
291  else:
292  if opt in ("-s", "--sample"):
293  dir_artifact_sample = arg
294  assert os.path.isdir(dir_artifact_sample)
295  elif opt in ("-r", "--reference"):
296  dir_artifact_ref = arg
297  assert os.path.isdir(dir_artifact_ref)
298  elif opt in ("-e", "--epsilon"):
299  epsilon = float(arg)
300  elif opt in ("-a", "--attribute"):
301  query_attribute = arg
302  elif opt in ("-p", "--print"):
303  to_print = True
304 
305  assert dir_artifact_ref is not ""
306  assert dir_artifact_sample is not ""
307  assert epsilon <= 1
308 
309  GPU_list_ref = listdir(dir_artifact_ref)
310  GPU_list_sample = listdir(dir_artifact_sample)
311 
312  for gpu in GPU_list_ref:
313  dir_name_ref = dir_artifact_ref + "/" + gpu + "/Benchmarks"
314  filename_list_ref = listdir(dir_name_ref)
315  dir_name_ref += "/"
316 
317  refBench = BenchmarkLoader(dir_name_ref, filename_list_ref)
318 
319  if gpu in GPU_list_sample:
320  dir_name_sample = dir_artifact_sample + "/" + gpu + "/Benchmarks"
321  filename_list_sample = listdir(dir_name_sample)
322  dir_name_sample += "/"
323 
324  sampleBench = BenchmarkLoader(
325  dir_name_sample, filename_list_sample
326  )
327  first_header = True
328  for index in range(len(filename_list_ref)):
329  refBench.load(filename_list_ref[index])
330  if filename_list_ref[index] in filename_list_sample:
331  sampleBench.load(filename_list_ref[index])
332  if first_header:
333  print(
334  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
335  )
336  print("++++ " + sampleBench.getGpuName())
337  print(
338  "++++ reference("
339  + refBench.getFrontAttribute("run_label")
340  + "): "
341  + refBench.getFrontAttribute("run_version")
342  )
343  print(
344  "++++ sample("
345  + sampleBench.getFrontAttribute("run_label")
346  + "): "
347  + sampleBench.getFrontAttribute("run_version")
348  )
349  print(
350  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
351  )
352  first_header = False
353 
354  analyzer = BenchAnalyzer(
355  refBench, sampleBench, query_attribute
356  )
357  analyzer.findAnomaliesRatio(epsilon)
358  if to_print:
359  printer = PrettyPrint(
360  refBench, sampleBench, query_attribute
361  )
362  printer.printAttribute()
363  else:
364  print(
365  "No sample results for table "
366  + refBench.getRunTableName()
367  + " were found."
368  )
369  print(
370  "======================================================================="
371  )
372 
373  else:
374  print(
375  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
376  )
377  print("++++ No sample results for GPU " + gpu + " were found.")
378  print(
379  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
380  )
381 
382