diff --git a/UD_any/print_results.py b/UD_any/print_results.py
index 39ffe175bb32a6eb9ea9942cf4bf638adf4a208a..69cdb4b0dfeff970c9831055de589053d3d515bc 100755
--- a/UD_any/print_results.py
+++ b/UD_any/print_results.py
@@ -21,6 +21,9 @@ if __name__ == "__main__" :
     model = pathToFile.split("_UD_")[0]
     corpus = pathToFile.split("_UD_")[1].split('.')[0]
 
+    if corpus not in outputByModelScore :
+      outputByModelScore[corpus] = dict()
+
     for line in open(pathToFile, "r") :
       for metric in metrics :
         if metric in line and metric[0] == line[0]:
@@ -28,31 +31,32 @@ if __name__ == "__main__" :
 
           model = model.split('.')[0]
 
-          if model not in outputByModelScore :
-            outputByModelScore[model] = dict()
-          if splited[0] not in outputByModelScore[model] :
-            outputByModelScore[model][splited[0]] = []
-
-          outputByModelScore[model][splited[0]].append([corpus, splited[0], splited[3], model])
-
-  for model in outputByModelScore :
-    for metric in outputByModelScore[model] :
-      score = 0.0
-      for exp in outputByModelScore[model][metric] :
-        score += float(exp[2])
-      score /= len(outputByModelScore[model][metric])
-      standardDeviation = 0.0
-      if len(outputByModelScore[model][metric]) > 1 :
-        for exp in outputByModelScore[model][metric] :
-          standardDeviation += (float(exp[2])-score)**2
-        standardDeviation /= len(outputByModelScore[model][metric])-1
-        standardDeviation = math.sqrt(standardDeviation)
-      if standardDeviation > 0 :
-        score = "%.2f[±%.2f]%%"%(score,standardDeviation)
-      else :
-        score = "%.2f%%"%score
-      output.append(outputByModelScore[model][metric][0])
-      output[-1][2] = score
+          if model not in outputByModelScore[corpus] :
+            outputByModelScore[corpus][model] = dict()
+          if splited[0] not in outputByModelScore[corpus][model] :
+            outputByModelScore[corpus][model][splited[0]] = []
+
+          outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[3], model])
+
+  for corpus in outputByModelScore :
+    for model in outputByModelScore[corpus] :
+      for metric in outputByModelScore[corpus][model] :
+        score = 0.0
+        for exp in outputByModelScore[corpus][model][metric] :
+          score += float(exp[2])
+        score /= len(outputByModelScore[corpus][model][metric])
+        standardDeviation = 0.0
+        if len(outputByModelScore[corpus][model][metric]) > 1 :
+          for exp in outputByModelScore[corpus][model][metric] :
+            standardDeviation += (float(exp[2])-score)**2
+          standardDeviation /= len(outputByModelScore[corpus][model][metric])-1
+          standardDeviation = math.sqrt(standardDeviation)
+        if standardDeviation > 0 :
+          score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+        else :
+          score = "%.2f%%"%score
+        output.append(outputByModelScore[corpus][model][metric][0])
+        output[-1][2] = score
 
   maxColLens = [0 for _ in range(len(output[0]))]