Skip to content
Snippets Groups Projects
Commit 38520c3b authored by Franck Dary's avatar Franck Dary
Browse files

Updated print_result.py script to compute standard deviation

parent f4d28664
Branches
No related tags found
No related merge requests found
......@@ -2,29 +2,57 @@
import glob
import sys
import math
if __name__ == "__main__" :
metrics = ["LAS","UAS","Tokens","Sentences","UPOS","UFeats","Lemmas"]
output = []
outputByModelScore = dict()
for pathToFile in glob.iglob("" + '*stdout') :
model = pathToFile.split(".")[0].split("_UD_")[0]
corpus = pathToFile.split(".")[0].split("_UD_")[1]
model = pathToFile.split("_UD_")[0]
corpus = pathToFile.split("_UD_")[1].split('.')[0]
for line in open(pathToFile, "r") :
for metric in metrics :
if metric in line and metric[0] == line[0]:
splited = line.strip().replace("|","").split()
output.append([corpus, splited[0], splited[3], model])
model = model.split('.')[0]
if model not in outputByModelScore :
outputByModelScore[model] = dict()
if splited[0] not in outputByModelScore[model] :
outputByModelScore[model][splited[0]] = []
outputByModelScore[model][splited[0]].append([corpus, splited[0], splited[3], model])
for model in outputByModelScore :
for metric in outputByModelScore[model] :
score = 0.0
standardDeviation = 0.0
for exp in outputByModelScore[model][metric] :
score += float(exp[2])
score /= len(outputByModelScore[model][metric])
for exp in outputByModelScore[model][metric] :
standardDeviation += (float(exp[2])-score)**2
standardDeviation /= len(outputByModelScore[model][metric])-1
standardDeviation = math.sqrt(standardDeviation)
score = "%.2f[±%.2f]%%"%(score,standardDeviation)
output.append(outputByModelScore[model][metric][0])
output[-1][2] = score
maxColLens = [0 for _ in range(len(output[0]))]
output = [["Corpus","Metric","F1.score","Model"]] + output
for line in output :
for i in range(len(line)) :
maxColLens[i] = max(maxColLens[i], len(line[i]))
output = output[1:]
output.sort()
output = [["Corpus","Metric","F1.score","Model"]] + output
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment