diff --git a/perf.py b/perf.py index 9875fbdb..bb1f5b91 100755 --- a/perf.py +++ b/perf.py @@ -177,7 +177,10 @@ def geomean(par): l = len(par) for i in range(l): temp = temp * par[i] - temp = temp ** (1.0/l) + if l != 0: + temp = temp ** (1.0/l) + else: + temp = 0 return round(temp, 2) #takes an answer struct and print it. @@ -189,18 +192,30 @@ def geomean(par): #test[4] - list of absolute results with tasks #test[5] - list of absolute time without ISPC (serial) #test[1..4] may be empty -def print_answer(answer): +def print_answer(answer, target_number): filelist = [] print_debug("--------------------------------------------------------------------------\n", s, perf_log) print_debug("test name:\t ISPC speedup: ISPC + tasks speedup: | " + " ISPC time: ISPC + tasks time: serial:\n", s, perf_log) - filelist.append("test name,ISPC speedup,diff," + - "ISPC + tasks speedup,diff,ISPC time,diff,ISPC + tasks time,diff,serial,diff\n") + if target_number > 1: + if options.output == "": + options.output = "targets.csv" + filelist.append("test name,ISPC speedup" + "," * target_number + "ISPC + tasks speedup\n") + filelist.append("," + options.perf_target + "," + options.perf_target + "\n") + else: + filelist.append("test name,ISPC speedup,diff," + + "ISPC + tasks speedup,diff,ISPC time,diff,ISPC + tasks time,diff,serial,diff\n") max_t = [0,0,0,0,0] diff_t = [0,0,0,0,0] - geomean_t = [0,0,0,0,0] - list_of_max = [[],[],[],[],[]] + geomean_t = [] + list_of_max = [] + for i1 in range(target_number): + geomean_t.append([0,0,0,0,0]) + list_of_max.append([[],[],[],[],[]]) list_of_compare = [[],[],[],[],[],[]] + target_k = 0 + temp_str_1 = "" + temp_str_2 = "" for i in range(len(answer)): list_of_compare[0].append(answer[i][0]) for t in range(1,6): @@ -215,7 +230,7 @@ def print_answer(answer): mm = min(answer[i][t]) list_of_compare[t].append(mm) max_t[t-1] = '%.2f' % mm - list_of_max[t-1].append(mm) + list_of_max[i % target_number][t-1].append(mm) diff_t[t-1] = '%.2f' % (max(answer[i][t]) - min(answer[i][t])) print_debug("%s:\n" % answer[i][0], s, perf_log) print_debug("\t\tmax:\t%5s\t\t%10s\t|min:%10s\t%10s\t%10s\n" % @@ -227,17 +242,37 @@ def print_answer(answer): max_t[t] = "" if diff_t[t] == "n/a": diff_t[t] = "" - filelist.append(answer[i][0] + "," + + if target_number > 1: + if target_k == 0: + temp_str_1 = answer[i][0] + "," + temp_str_2 = "" + temp_str_1 += max_t[0] + "," + temp_str_2 += max_t[1] + "," + target_k = target_k + 1 + if target_k == target_number: + filelist.append(temp_str_1 + temp_str_2[:-1] + "\n") + target_k = 0 + else: + filelist.append(answer[i][0] + "," + max_t[0] + "," + diff_t[0] + "," + max_t[1] + "," + diff_t[1] + "," + max_t[2] + "," + diff_t[2] + "," + max_t[3] + "," + diff_t[3] + "," + max_t[4] + "," + diff_t[4] + "\n") for i in range(0,5): - geomean_t[i] = geomean(list_of_max[i]) + for i1 in range(target_number): + geomean_t[i1][i] = geomean(list_of_max[i1][i]) print_debug("---------------------------------------------------------------------------------\n", s, perf_log) print_debug("Geomean:\t\t%5s\t\t%10s\t|%14s\t%10s\t%10s\n" % - (geomean_t[0], geomean_t[1], geomean_t[2], geomean_t[3], geomean_t[4]), s, perf_log) - filelist.append("Geomean," + str(geomean_t[0]) + ",," + str(geomean_t[1]) - + ",," + str(geomean_t[2]) + ",," + str(geomean_t[3]) + ",," + str(geomean_t[4]) + "\n") + (geomean_t[0][0], geomean_t[0][1], geomean_t[0][2], geomean_t[0][3], geomean_t[0][4]), s, perf_log) + if target_number > 1: + temp_str_1 = "Geomean," + temp_str_2 = "" + for i in range(target_number): + temp_str_1 += str(geomean_t[i][0]) + "," + temp_str_2 += str(geomean_t[i][1]) + "," + filelist.append(temp_str_1 + temp_str_2[:-1] + "\n") + else: + filelist.append("Geomean," + str(geomean_t[0][0]) + ",," + str(geomean_t[0][1]) + + ",," + str(geomean_t[0][2]) + ",," + str(geomean_t[0][3]) + ",," + str(geomean_t[0][4]) + "\n") print_file(filelist) return list_of_compare @@ -409,8 +444,6 @@ def perf(options1, args): while i < length-2: # we read name of test print_debug("%s" % lines[i], s, perf_log) - test = [lines[i][:-1],[],[],[],[],[]] - test_ref = [lines[i][:-1],[],[],[],[],[]] # read location of test folder = lines[i+1] folder = folder[:-1] @@ -424,41 +457,51 @@ def perf(options1, args): command = lines[i+2] command = command[:-1] # handle conditional target argument - target_str = "" + target_str_temp = "" + perf_targets = [""] + target_number = 1 if options.perf_target != "": - target_str = " ISPC_IA_TARGETS="+options.perf_target - if is_windows == False: - ex_command_ref = "./ref " + command + " >> " + perf_temp + "_ref" - ex_command = "./test " + command + " >> " + perf_temp + "_test" - bu_command_ref = "make CXX="+ref_compiler+" CC="+refc_compiler+ " EXAMPLE=ref ISPC="+ispc_ref+target_str+" >> "+build_log+" 2>> "+build_log - bu_command = "make CXX="+ref_compiler+" CC="+refc_compiler+ " EXAMPLE=test ISPC="+ispc_test+target_str+" >> "+build_log+" 2>> "+build_log - re_command = "make clean >> "+build_log - else: - ex_command_ref = "x64\\Release\\ref.exe " + command + " >> " + perf_temp + "_ref" - ex_command = "x64\\Release1\\test.exe " + command + " >> " + perf_temp + "_test" - bu_command_ref = "msbuild /V:m /p:Platform=x64 /p:Configuration=Release /p:TargetDir=.\ /p:TargetName=ref /p:ISPC_compiler=ispc_ref /t:rebuild >> " + build_log - bu_command = "msbuild /V:m /p:Platform=x64 /p:Configuration=Release /p:TargetDir=.\ /p:TargetName=test /p:ISPC_compiler=ispc /t:rebuild >> " + build_log - re_command = "msbuild /t:clean >> " + build_log - commands = [ex_command, bu_command, ex_command_ref, bu_command_ref, re_command] - # parsing config parameters - next_line = lines[i+3] - if next_line[0] == "!": # we should take only one part of test output - R = next_line.split(' ') - c1 = int(R[1]) #c1 is a number of string which we want to use in test output - c2 = int(R[2]) #c2 is total number of strings in test output - i = i+1 - else: - c1 = 1 - c2 = 1 - next_line = lines[i+3] - if next_line[0] == "^": #we should concatenate result of this test with previous one - run_test(commands, c1, c2, answer[len(answer)-1], answer_ref[len(answer)-1], False) - i = i+1 - else: #we run this test and append it's result to answer structure - run_test(commands, c1, c2, test, test_ref, True) - answer.append(test) - answer_ref.append(test_ref) - + perf_targets = options.perf_target.split(',') + target_str_temp = " ISPC_IA_TARGETS=" + target_number = len(perf_targets) + temp = 0 + for target_i in range(target_number): + test = [lines[i][:-1],[],[],[],[],[]] + test_ref = [lines[i][:-1],[],[],[],[],[]] + target_str = target_str_temp + perf_targets[target_i] + if is_windows == False: + ex_command_ref = "./ref " + command + " >> " + perf_temp + "_ref" + ex_command = "./test " + command + " >> " + perf_temp + "_test" + bu_command_ref = "make CXX="+ref_compiler+" CC="+refc_compiler+ " EXAMPLE=ref ISPC="+ispc_ref+target_str+" >> "+build_log+" 2>> "+build_log + bu_command = "make CXX="+ref_compiler+" CC="+refc_compiler+ " EXAMPLE=test ISPC="+ispc_test+target_str+" >> "+build_log+" 2>> "+build_log + re_command = "make clean >> "+build_log + else: + ex_command_ref = "x64\\Release\\ref.exe " + command + " >> " + perf_temp + "_ref" + ex_command = "x64\\Release1\\test.exe " + command + " >> " + perf_temp + "_test" + bu_command_ref = "msbuild /V:m /p:Platform=x64 /p:Configuration=Release /p:TargetDir=.\ /p:TargetName=ref /p:ISPC_compiler=ispc_ref /t:rebuild >> " + build_log + bu_command = "msbuild /V:m /p:Platform=x64 /p:Configuration=Release /p:TargetDir=.\ /p:TargetName=test /p:ISPC_compiler=ispc /t:rebuild >> " + build_log + re_command = "msbuild /t:clean >> " + build_log + commands = [ex_command, bu_command, ex_command_ref, bu_command_ref, re_command] + # parsing config parameters + next_line = lines[i+3] + if next_line[0] == "!": # we should take only one part of test output + R = next_line.split(' ') + c1 = int(R[1]) #c1 is a number of string which we want to use in test output + c2 = int(R[2]) #c2 is total number of strings in test output + temp = 1 + else: + c1 = 1 + c2 = 1 + next_line = lines[i+3] + if next_line[0] == "^": + temp = 1 + if next_line[0] == "^" and target_number == 1: #we should concatenate result of this test with previous one + run_test(commands, c1, c2, answer[len(answer)-1], answer_ref[len(answer)-1], False) + else: #we run this test and append it's result to answer structure + run_test(commands, c1, c2, test, test_ref, True) + answer.append(test) + answer_ref.append(test_ref) + i = i + temp # preparing next loop iteration os.chdir(pwd1) i+=4 @@ -468,8 +511,10 @@ def perf(options1, args): common.remove_if_exists(perf_temp+"_ref") #print collected answer + if target_number > 1: + s = True print_debug("\n\nTEST COMPILER:\n", s, perf_log) - A = print_answer(answer) + A = print_answer(answer, target_number) if options.ref != "": print_debug("\n\nREFERENCE COMPILER:\n", s, perf_log) B = print_answer(answer_ref)