From 546d0e4403b331f6b991fcc863815a347bd89980 Mon Sep 17 00:00:00 2001
From: Franck Dary <franck.dary@lis-lab.fr>
Date: Fri, 29 Nov 2019 18:12:19 +0100
Subject: [PATCH] Show errors in script printResults

---
 UD_any/print_results.py | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/UD_any/print_results.py b/UD_any/print_results.py
index 7b1abe3..39ffe17 100755
--- a/UD_any/print_results.py
+++ b/UD_any/print_results.py
@@ -11,6 +11,12 @@ if __name__ == "__main__" :
   output = []
   outputByModelScore = dict()
 
+  for pathToFile in glob.iglob("" + '*stderr') :
+    for line in open(pathToFile, "r") :
+      if "Error" in line or "ERROR" in line or "error" in line :
+        print(pathToFile,":")
+        print("\t"+line,end="")
+
   for pathToFile in glob.iglob("" + '*stdout') :
     model = pathToFile.split("_UD_")[0]
     corpus = pathToFile.split("_UD_")[1].split('.')[0]
@@ -32,15 +38,19 @@ if __name__ == "__main__" :
   for model in outputByModelScore :
     for metric in outputByModelScore[model] :
       score = 0.0
-      standardDeviation = 0.0
       for exp in outputByModelScore[model][metric] :
         score += float(exp[2])
       score /= len(outputByModelScore[model][metric])
-      for exp in outputByModelScore[model][metric] :
-        standardDeviation += (float(exp[2])-score)**2
-      standardDeviation /= len(outputByModelScore[model][metric])-1
-      standardDeviation = math.sqrt(standardDeviation)
-      score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+      standardDeviation = 0.0
+      if len(outputByModelScore[model][metric]) > 1 :
+        for exp in outputByModelScore[model][metric] :
+          standardDeviation += (float(exp[2])-score)**2
+        standardDeviation /= len(outputByModelScore[model][metric])-1
+        standardDeviation = math.sqrt(standardDeviation)
+      if standardDeviation > 0 :
+        score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+      else :
+        score = "%.2f%%"%score
       output.append(outputByModelScore[model][metric][0])
       output[-1][2] = score
 
-- 
GitLab