OmniSciDB  04ee39c94c
analyze-benchmark Namespace Reference

Classes

class  BenchAnalyzer
 
class  BenchmarkLoader
 
class  PrettyPrint
 

Functions

def compute_speedup (x, y)
 
def main (argv)
 

Function Documentation

◆ compute_speedup()

def analyze-benchmark.compute_speedup (   x,
  y 
)

Definition at line 130 of file analyze-benchmark.py.

Referenced by analyze-benchmark.BenchAnalyzer.findAnomaliesRatio(), and analyze-benchmark.PrettyPrint.printAttribute().

130 def compute_speedup(x, y):
131  result = []
132  zipped = list(zip(x, y))
133  for q in zipped:
134  result.append(q[0] / q[1])
135  return result
136 
137 
+ Here is the caller graph for this function:

◆ main()

def analyze-benchmark.main (   argv)

Definition at line 220 of file analyze-benchmark.py.

220 def main(argv):
221  try:
222  opts, args = getopt.getopt(
223  argv,
224  "hs:r:e:a:p",
225  [
226  "help",
227  "sample=",
228  "reference=",
229  "epsilon=",
230  "attribute=",
231  "print",
232  ],
233  )
234  except getopt.GetOptError:
235  print(
236  "python3 analyze-benchmark.py -s <sample dir> -r <reference dir> -e <epsilon> -a <attribute> -p"
237  )
238  sys.exit(2)
239 
240  dir_artifact_sample = ""
241  dir_artifact_ref = ""
242  epsilon = 0.05
243  query_attribute = (
244  "query_total_avg"
245  ) # default attribute to use for benchmark comparison
246 
247  to_print = False # printing all the results, disabled by default
248 
249  for opt, arg in opts:
250  if opt in ("-h", "--help"):
251  print(
252  """
253  -s/--sample:\t\t\t directory of the results for the benchmarked sample branch
254  -r/--reference:\t\t\t directory of the results for the benchmarked reference branch
255  -e/--epsilon:\t\t\t ratio tolerance for reporting results outside this range
256  -a/--attribute:\t\t\t attribute to be used for benchmark comparison (default: query_total_avg)
257  -p/--print:\t\t\t\t print all the results
258  """
259  )
260  sys.exit()
261  else:
262  if opt in ("-s", "--sample"):
263  dir_artifact_sample = arg
264  assert os.path.isdir(dir_artifact_sample)
265  elif opt in ("-r", "--reference"):
266  dir_artifact_ref = arg
267  assert os.path.isdir(dir_artifact_ref)
268  elif opt in ("-e", "--epsilon"):
269  epsilon = float(arg)
270  elif opt in ("-a", "--attribute"):
271  query_attribute = arg
272  elif opt in ("-p", "--print"):
273  to_print = True
274 
275  assert dir_artifact_ref is not ""
276  assert dir_artifact_sample is not ""
277  assert epsilon <= 1
278 
279  GPU_list_ref = listdir(dir_artifact_ref)
280  GPU_list_sample = listdir(dir_artifact_sample)
281 
282  for gpu in GPU_list_ref:
283  dir_name_ref = dir_artifact_ref + "/" + gpu + "/Benchmarks"
284  filename_list_ref = listdir(dir_name_ref)
285  dir_name_ref += "/"
286 
287  refBench = BenchmarkLoader(dir_name_ref, filename_list_ref)
288 
289  if gpu in GPU_list_sample:
290  dir_name_sample = dir_artifact_sample + "/" + gpu + "/Benchmarks"
291  filename_list_sample = listdir(dir_name_sample)
292  dir_name_sample += "/"
293 
294  sampleBench = BenchmarkLoader(
295  dir_name_sample, filename_list_sample
296  )
297  first_header = True
298  for index in range(len(filename_list_ref)):
299  refBench.load(filename_list_ref[index])
300  if filename_list_ref[index] in filename_list_sample:
301  sampleBench.load(filename_list_ref[index])
302  if first_header:
303  print(
304  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
305  )
306  print("++++ " + sampleBench.getGpuName())
307  print(
308  "++++ reference("
309  + refBench.getFrontAttribute("run_label")
310  + "): "
311  + refBench.getFrontAttribute("run_version")
312  )
313  print(
314  "++++ sample("
315  + sampleBench.getFrontAttribute("run_label")
316  + "): "
317  + sampleBench.getFrontAttribute("run_version")
318  )
319  print(
320  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
321  )
322  first_header = False
323 
324  analyzer = BenchAnalyzer(
325  refBench, sampleBench, query_attribute
326  )
327  analyzer.findAnomaliesRatio(epsilon)
328  if to_print:
329  printer = PrettyPrint(
330  refBench, sampleBench, query_attribute
331  )
332  printer.printAttribute()
333  else:
334  print(
335  "No sample results for table "
336  + refBench.getRunTableName()
337  + " were found."
338  )
339  print(
340  "======================================================================="
341  )
342 
343  else:
344  print(
345  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
346  )
347  print("++++ No sample results for GPU " + gpu + " were found.")
348  print(
349  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
350  )
351 
352