From 5f09dd0025e7e4d120015b19c1bbf9fe947ad080 Mon Sep 17 00:00:00 2001 From: Franck Dary <franck.dary@lis-lab.fr> Date: Tue, 3 Dec 2019 09:45:40 +0100 Subject: [PATCH] fixed script to print results --- UD_any/print_results.py | 54 ++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/UD_any/print_results.py b/UD_any/print_results.py index 39ffe17..69cdb4b 100755 --- a/UD_any/print_results.py +++ b/UD_any/print_results.py @@ -21,6 +21,9 @@ if __name__ == "__main__" : model = pathToFile.split("_UD_")[0] corpus = pathToFile.split("_UD_")[1].split('.')[0] + if corpus not in outputByModelScore : + outputByModelScore[corpus] = dict() + for line in open(pathToFile, "r") : for metric in metrics : if metric in line and metric[0] == line[0]: @@ -28,31 +31,32 @@ if __name__ == "__main__" : model = model.split('.')[0] - if model not in outputByModelScore : - outputByModelScore[model] = dict() - if splited[0] not in outputByModelScore[model] : - outputByModelScore[model][splited[0]] = [] - - outputByModelScore[model][splited[0]].append([corpus, splited[0], splited[3], model]) - - for model in outputByModelScore : - for metric in outputByModelScore[model] : - score = 0.0 - for exp in outputByModelScore[model][metric] : - score += float(exp[2]) - score /= len(outputByModelScore[model][metric]) - standardDeviation = 0.0 - if len(outputByModelScore[model][metric]) > 1 : - for exp in outputByModelScore[model][metric] : - standardDeviation += (float(exp[2])-score)**2 - standardDeviation /= len(outputByModelScore[model][metric])-1 - standardDeviation = math.sqrt(standardDeviation) - if standardDeviation > 0 : - score = "%.2f[±%.2f]%%"%(score,standardDeviation) - else : - score = "%.2f%%"%score - output.append(outputByModelScore[model][metric][0]) - output[-1][2] = score + if model not in outputByModelScore[corpus] : + outputByModelScore[corpus][model] = dict() + if splited[0] not in outputByModelScore[corpus][model] : + outputByModelScore[corpus][model][splited[0]] = [] + + outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[3], model]) + + for corpus in outputByModelScore : + for model in outputByModelScore[corpus] : + for metric in outputByModelScore[corpus][model] : + score = 0.0 + for exp in outputByModelScore[corpus][model][metric] : + score += float(exp[2]) + score /= len(outputByModelScore[corpus][model][metric]) + standardDeviation = 0.0 + if len(outputByModelScore[corpus][model][metric]) > 1 : + for exp in outputByModelScore[corpus][model][metric] : + standardDeviation += (float(exp[2])-score)**2 + standardDeviation /= len(outputByModelScore[corpus][model][metric])-1 + standardDeviation = math.sqrt(standardDeviation) + if standardDeviation > 0 : + score = "%.2f[±%.2f]%%"%(score,standardDeviation) + else : + score = "%.2f%%"%score + output.append(outputByModelScore[corpus][model][metric][0]) + output[-1][2] = score maxColLens = [0 for _ in range(len(output[0]))] -- GitLab