diff --git a/UD_any/print_results.py b/UD_any/print_results.py
index 7b1abe3c5e74a63fd765567edb6d1df63657a0ff..39ffe175bb32a6eb9ea9942cf4bf638adf4a208a 100755
--- a/UD_any/print_results.py
+++ b/UD_any/print_results.py
@@ -11,6 +11,12 @@ if __name__ == "__main__" :
   output = []
   outputByModelScore = dict()
 
+  for pathToFile in glob.iglob("" + '*stderr') :
+    for line in open(pathToFile, "r") :
+      if "Error" in line or "ERROR" in line or "error" in line :
+        print(pathToFile,":")
+        print("\t"+line,end="")
+
   for pathToFile in glob.iglob("" + '*stdout') :
     model = pathToFile.split("_UD_")[0]
     corpus = pathToFile.split("_UD_")[1].split('.')[0]
@@ -32,15 +38,19 @@ if __name__ == "__main__" :
   for model in outputByModelScore :
     for metric in outputByModelScore[model] :
       score = 0.0
-      standardDeviation = 0.0
       for exp in outputByModelScore[model][metric] :
         score += float(exp[2])
       score /= len(outputByModelScore[model][metric])
-      for exp in outputByModelScore[model][metric] :
-        standardDeviation += (float(exp[2])-score)**2
-      standardDeviation /= len(outputByModelScore[model][metric])-1
-      standardDeviation = math.sqrt(standardDeviation)
-      score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+      standardDeviation = 0.0
+      if len(outputByModelScore[model][metric]) > 1 :
+        for exp in outputByModelScore[model][metric] :
+          standardDeviation += (float(exp[2])-score)**2
+        standardDeviation /= len(outputByModelScore[model][metric])-1
+        standardDeviation = math.sqrt(standardDeviation)
+      if standardDeviation > 0 :
+        score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+      else :
+        score = "%.2f%%"%score
       output.append(outputByModelScore[model][metric][0])
       output[-1][2] = score