Skip to content
Snippets Groups Projects
Commit 5f09dd00 authored by Franck Dary's avatar Franck Dary
Browse files

fixed script to print results

parent f770ba68
No related branches found
No related tags found
No related merge requests found
...@@ -21,6 +21,9 @@ if __name__ == "__main__" : ...@@ -21,6 +21,9 @@ if __name__ == "__main__" :
model = pathToFile.split("_UD_")[0] model = pathToFile.split("_UD_")[0]
corpus = pathToFile.split("_UD_")[1].split('.')[0] corpus = pathToFile.split("_UD_")[1].split('.')[0]
if corpus not in outputByModelScore :
outputByModelScore[corpus] = dict()
for line in open(pathToFile, "r") : for line in open(pathToFile, "r") :
for metric in metrics : for metric in metrics :
if metric in line and metric[0] == line[0]: if metric in line and metric[0] == line[0]:
...@@ -28,30 +31,31 @@ if __name__ == "__main__" : ...@@ -28,30 +31,31 @@ if __name__ == "__main__" :
model = model.split('.')[0] model = model.split('.')[0]
if model not in outputByModelScore : if model not in outputByModelScore[corpus] :
outputByModelScore[model] = dict() outputByModelScore[corpus][model] = dict()
if splited[0] not in outputByModelScore[model] : if splited[0] not in outputByModelScore[corpus][model] :
outputByModelScore[model][splited[0]] = [] outputByModelScore[corpus][model][splited[0]] = []
outputByModelScore[model][splited[0]].append([corpus, splited[0], splited[3], model]) outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[3], model])
for model in outputByModelScore : for corpus in outputByModelScore :
for metric in outputByModelScore[model] : for model in outputByModelScore[corpus] :
for metric in outputByModelScore[corpus][model] :
score = 0.0 score = 0.0
for exp in outputByModelScore[model][metric] : for exp in outputByModelScore[corpus][model][metric] :
score += float(exp[2]) score += float(exp[2])
score /= len(outputByModelScore[model][metric]) score /= len(outputByModelScore[corpus][model][metric])
standardDeviation = 0.0 standardDeviation = 0.0
if len(outputByModelScore[model][metric]) > 1 : if len(outputByModelScore[corpus][model][metric]) > 1 :
for exp in outputByModelScore[model][metric] : for exp in outputByModelScore[corpus][model][metric] :
standardDeviation += (float(exp[2])-score)**2 standardDeviation += (float(exp[2])-score)**2
standardDeviation /= len(outputByModelScore[model][metric])-1 standardDeviation /= len(outputByModelScore[corpus][model][metric])-1
standardDeviation = math.sqrt(standardDeviation) standardDeviation = math.sqrt(standardDeviation)
if standardDeviation > 0 : if standardDeviation > 0 :
score = "%.2f[±%.2f]%%"%(score,standardDeviation) score = "%.2f[±%.2f]%%"%(score,standardDeviation)
else : else :
score = "%.2f%%"%score score = "%.2f%%"%score
output.append(outputByModelScore[model][metric][0]) output.append(outputByModelScore[corpus][model][metric][0])
output[-1][2] = score output[-1][2] = score
maxColLens = [0 for _ in range(len(output[0]))] maxColLens = [0 for _ in range(len(output[0]))]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment