diff --git a/UD_any/print_results.py b/UD_any/print_results.py
index 8a934f77ff4d9ae04802d5cdf92458d4aca7541e..4b3636b966fdcbe0abdd4cc830acf202e9ac1467 100755
--- a/UD_any/print_results.py
+++ b/UD_any/print_results.py
@@ -4,11 +4,20 @@ import glob
 import sys
 import math
 
+################################################################################
 if __name__ == "__main__" :
 
-  scoreCol = -1
+  scoreTypes = {
+    "F1" : ["F1.score","%",-1,"%.2f"],
+    "R2" : ["R²","",-3,"%.4f"],
+    "L1" : ["L1","",-1,"%.2f"],
+    "L2" : ["L2","",-2,"%.2f"],
+  }
 
-  metrics = ["LAS","UAS","Tokens","Words","Sentences","UPOS","UFeats","Lemmas"]
+  scoreType = scoreTypes["L1"] if len(sys.argv) < 3 else scoreTypes[sys.argv[2].upper()]
+
+  #metrics = ["LAS","UAS","Tokens","Words","Sentences","UPOS","UFeats","Lemmas"]
+  metrics = ["TOTAL_FIXATION_DURATION"]
 
   output = []
   outputByModelScore = dict()
@@ -41,7 +50,7 @@ if __name__ == "__main__" :
           if splited[0] not in outputByModelScore[corpus][model] :
             outputByModelScore[corpus][model][splited[0]] = []
 
-          outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[scoreCol], model])
+          outputByModelScore[corpus][model][splited[0]].append([corpus, splited[0], splited[scoreType[2]], model])
 
   for corpus in outputByModelScore :
     for model in outputByModelScore[corpus] :
@@ -57,12 +66,12 @@ if __name__ == "__main__" :
           standardDeviation /= len(outputByModelScore[corpus][model][metric])
           standardDeviation = math.sqrt(standardDeviation)
         baseScore = score
-        if standardDeviation > 0 :
-          score = "%.2f[±%.2f]%%"%(score,standardDeviation)
+        if float("%.2f"%standardDeviation) > 0 :
+          score = "%s[±%%.2f]%%s"%scoreType[3]%(score,standardDeviation,scoreType[1])
         else :
-          score = "%.2f%%"%score
+          score = "%s%%s"%scoreType[3]%(score, scoreType[1])
         if '-' in score :
-          score = score.replace('-','').replace('%','')
+          score = score.replace('-','')
         output.append(outputByModelScore[corpus][model][metric][0])
         output[-1][2] = score
         output[-1] = [output[-1][0]] + [baseScore] + output[-1][1:]
@@ -78,7 +87,7 @@ if __name__ == "__main__" :
 
   maxColLens = [0 for _ in range(len(output[0]))]
 
-  output = [["Corpus","Metric","F1.score","Model"]] + output
+  output = [["Corpus","Metric",scoreType[0],"Model"]] + output
 
   for line in output :
     for i in range(len(line)) :
@@ -94,3 +103,5 @@ if __name__ == "__main__" :
       padding = (' '*(maxColLens[j]-len(str(output[i][j]))))+" "*3
       print(output[i][j], end=padding)
     print("")
+################################################################################
+