From 3dda66ce8d7ff1605f74ae41532688bc08fed974 Mon Sep 17 00:00:00 2001
From: Franck Dary <franck.dary@lis-lab.fr>
Date: Tue, 31 Mar 2020 19:54:53 +0200
Subject: [PATCH] Added script to print results

---
 UD_any/print_results.py | 83 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 83 insertions(+)
 create mode 100755 UD_any/print_results.py

diff --git a/UD_any/print_results.py b/UD_any/print_results.py
new file mode 100755
index 0000000..44ec0af
--- /dev/null
+++ b/UD_any/print_results.py
@@ -0,0 +1,83 @@
+#! /usr/bin/python3
+
+import glob
+import sys
+import math
+
+if __name__ == "__main__" :
+
+#metrics = ["LAS","UAS","Tokens","Words","Sentences","UPOS","UFeats","Lemmas"]
+  metrics = ["Tokens","Words"]
+
+  output = []
+  outputByModelScore = dict()
+
+  for pathToFile in glob.iglob("" + '*stderr') :
+    for line in open(pathToFile, "r") :
+      if "Error" in line or "ERROR" in line or "error" in line :
+        print(pathToFile,":", file=sys.stderr)
+        print("\t"+line,end="", file=sys.stderr)
+
+  for pathToFile in glob.iglob("" + '*stdout') :
+    model = pathToFile.split("_UD_")[0]
+    corpus = pathToFile.split("_UD_")[1].split('.')[0]
+
+    if corpus not in outputByModelScore :
+      outputByModelScore[corpus] = dict()
+
+    for line in open(pathToFile, "r") :
+      for metric in metrics :
+        if metric in line and metric[0] == line[0]:
+          splited = line.strip().replace("|","").split()
+
+          model = model.split('.')[0]
+
+          if model not in outputByModelScore[corpus] :
+            outputByModelScore[corpus][model] = dict()
+          if splited[0] not in outputByModelScore[corpus][model] :
+            outputByModelScore[corpus][model][splited[0]] = []
+
+          outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[3], model])
+
+  for corpus in outputByModelScore :
+    for model in outputByModelScore[corpus] :
+      for metric in outputByModelScore[corpus][model] :
+        score = 0.0
+        for exp in outputByModelScore[corpus][model][metric] :
+          score += float(exp[2])
+        score /= len(outputByModelScore[corpus][model][metric])
+        standardDeviation = 0.0
+        if len(outputByModelScore[corpus][model][metric]) > 1 :
+          for exp in outputByModelScore[corpus][model][metric] :
+            standardDeviation += (float(exp[2])-score)**2
+          standardDeviation /= len(outputByModelScore[corpus][model][metric])-1
+          standardDeviation = math.sqrt(standardDeviation)
+        if standardDeviation > 0 :
+          score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+        else :
+          score = "%.2f%%"%score
+        output.append(outputByModelScore[corpus][model][metric][0])
+        output[-1][2] = score
+
+  maxColLens = [0 for _ in range(len(output[0]))]
+
+  output = [["Corpus","Metric","F1.score","Model"]] + output
+
+  for line in output :
+    for i in range(len(line)) :
+      maxColLens[i] = max(maxColLens[i], len(line[i]))
+
+  output = output[1:]
+  output.sort()
+  output = [["Corpus","Metric","F1.score","Model"]] + output
+
+  dashLine = '-' * 80
+  for i in range(len(output)) :
+    if i > 0 and output[i][0] != output[i-1][0] :
+      print(dashLine)
+    elif i > 0 and output[i][1] != output[i-1][1] :
+      print("")
+    for j in range(len(output[i])) :
+      padding = (' '*(maxColLens[j]-len(output[i][j])))+" "*3
+      print(output[i][j], end=padding)
+    print("")
-- 
GitLab