diff --git a/UD_any/print_results.py b/UD_any/print_results.py
index d7477a567a3f6d96642aba63784fa5acb5da9968..2596c56df71b5a81d35e2f01b0de46a8530bf30b 100755
--- a/UD_any/print_results.py
+++ b/UD_any/print_results.py
@@ -11,14 +11,17 @@ if __name__ == "__main__" :
   output = []
   outputByModelScore = dict()
 
-  for pathToFile in glob.iglob("" + '*stderr') :
+  filenamesErr = glob.iglob((sys.argv[1]+"/"if len(sys.argv) > 1 else "") + '*stderr')
+  filenamesOut = glob.iglob((sys.argv[1]+"/"if len(sys.argv) > 1 else "") + '*stdout')
+
+  for pathToFile in filenamesErr :
     for line in open(pathToFile, "r") :
       if "Error" in line or "ERROR" in line or "error" in line :
         print(pathToFile,":", file=sys.stderr)
         print("\t"+line,end="", file=sys.stderr)
 
-  for pathToFile in glob.iglob("" + '*stdout') :
-    splited = pathToFile.split('.')
+  for pathToFile in  filenamesOut :
+    splited = pathToFile.split('/')[-1].split('.')
     model = ".".join(splited[:-3])
     corpus = splited[-3]
     index = splited[-2]
@@ -65,6 +68,7 @@ if __name__ == "__main__" :
   if len(output) == 0 :
     print("ERROR : Output length is 0", file=sys.stderr)
     print("  did you run evaluate.sh ?", file=sys.stderr)
+    print("USAGE : %s [directory of .stdout files]"%sys.argv[0], file=sys.stderr)
     exit(1)
 
   output.sort()