diff --git a/.gitignore b/.gitignore
index 4f336512151a281e093590309bd0e7996ce1145e..8973541e9f6ae20bc8d6c90013d79a26f15d60ae 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,4 +3,6 @@ TODO
 .idea/**
 .ipynb_checkpoints/**
 Results/**
-Code/MonoMultiviewClassifiers/Results/*
\ No newline at end of file
+Data/**
+Code/MonoMultiviewClassifiers/Results/*
+Code/Tests/temp_tests/**
\ No newline at end of file
diff --git a/Code/MonoMultiViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/BayesianInference.py b/Code/MonoMultiViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/BayesianInference.py
index 41f992b0a6ba33b4c6d37b31626664c66d6d7f37..37ee826480419e81dda38a812258730a51403cb3 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/BayesianInference.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/Fusion/Methods/LateFusionPackage/BayesianInference.py
@@ -74,7 +74,6 @@ class BayesianInference(LateFusionClassifier):
         if usedIndices is None:
             usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
         if sum(self.weights) != 1.0:
-            print self.weights
             self.weights = self.weights / sum(self.weights)
 
         viewScores = np.zeros((nbView, len(usedIndices), DATASET.get("Metadata").attrs["nbClass"]))
diff --git a/Code/MonoMultiViewClassifiers/Multiview/GetMultiviewDb.py b/Code/MonoMultiViewClassifiers/Multiview/GetMultiviewDb.py
index 581a320f838d4cd7d0a3ec554f3467ab62d281b1..18fa2efda9bcac8310653acb6dfa3ec274dc9938 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/GetMultiviewDb.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/GetMultiviewDb.py
@@ -5,6 +5,7 @@ import os
 import logging
 import h5py
 import operator
+import errno
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -28,7 +29,13 @@ def getPlausibleDBhdf5(features, pathF, name, NB_CLASS, LABELS_NAME, nbView=3,
                        nbClass=2, datasetLength=347, randomStateInt=42):
     randomState = np.random.RandomState(randomStateInt)
     nbFeatures = 250
-    datasetFile = h5py.File(pathF + "Plausible.hdf5", "w")
+    if not os.path.exists(os.path.dirname(pathF + "Plausible.hdf5")):
+        try:
+            os.makedirs(os.path.dirname(pathF + "Plausible.hdf5"))
+        except OSError as exc:
+            if exc.errno != errno.EEXIST:
+                raise
+    datasetFile = h5py.File(pathF + "/Plausible.hdf5", "w")
     CLASS_LABELS = np.array([0 for i in range(datasetLength / 2)] + [1 for i in range(datasetLength / 2)])
     for viewIndex in range(nbView):
         viewData = np.array([np.zeros(nbFeatures) for i in range(datasetLength / 2)] + [np.ones(nbFeatures)
diff --git a/Code/MonoMultiViewClassifiers/utils/execution.py b/Code/MonoMultiViewClassifiers/utils/execution.py
index f7b457ba903b242628816aec28595983c1580966..b3761ee71c228d52f294b8de5e2304b434ba4809 100644
--- a/Code/MonoMultiViewClassifiers/utils/execution.py
+++ b/Code/MonoMultiViewClassifiers/utils/execution.py
@@ -25,7 +25,7 @@ def parseTheArgs(arguments):
                                help='Name of the views selected for learning (default: %(default)s)',
                                default=[''])
     groupStandard.add_argument('--pathF', metavar='STRING', action='store', help='Path to the views (default: %(default)s)',
-                               default='/home/bbauvin/Documents/Data/Data_multi_omics/')
+                               default='../Data/')
     groupStandard.add_argument('--nice', metavar='INT', action='store', type=int,
                                help='Niceness for the process', default=0)
     groupStandard.add_argument('--randomState', metavar='STRING', action='store',
diff --git a/Readme.md b/Readme.md
index 680b2c6446134035fb454446bff2ba5a167af2a0..fbe1d3568a72364e256429bf2951f1f67f91dcaa 100644
--- a/Readme.md
+++ b/Readme.md
@@ -1,20 +1,14 @@
 # Mono- and Multi-view classification benchmark
 
-This project aims to be an easy-to use solution to run a prior benchmark on a dataset abd evaluate mono- and multi-view algorithms capacity to classify it correctly.
+This project aims to be an easy-to use solution to run a prior benchmark on a dataset and evaluate mono- & multi-view algorithms capacity to classify it correctly.
 
 ## Getting Started
 
-In order to run it you'll need to try on simulated data with the command
-```
-python multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/ExecClassif.py -log
-```
-Results will be stored in multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/Results/
-
 ### Prerequisites
 
 To be able to use this project, you'll need :
 
-* [Python 2.7](http://www.dropwizard.io/1.0.2/docs/) - The web framework used
+* [Python 2.7](http://www.dropwizard.io/1.0.2/docs/)
 
 And the following python modules :
 * [pyscm](https://github.com/aldro61/pyscm) - Set Covering Machine, Marchand, M., & Taylor, J. S. (2003) by A.Drouin, F.Brochu, G.Letarte St-Pierre, M.Osseni, P-L.Plante
@@ -22,24 +16,66 @@ And the following python modules :
 * [matplotlib](http://matplotlib.org/) - Used to plot results
 * [sklearn](http://scikit-learn.org/stable/) - Used for the monoview classifiers
 * [joblib](https://pypi.python.org/pypi/joblib) - Used to compute on multiple threads
-* [h5py](www.h5py.org) - Used to generate HDF5 datasets on hard drive and use them to sapre RAM
+* [h5py](www.h5py.org) - Used to generate HDF5 datasets on hard drive and use them to spare RAM
 * ([argparse](https://docs.python.org/3/library/argparse.html) - Used to parse the input args)
 * ([logging](https://docs.python.org/2/library/logging.html) - Used to generate log)
 
-They are all tested in  `multiview-machine-mearning-omis/Code/MonoMutliViewClassifiers/Versions.py` which is automatically checked each time you run the `ExecClassif` script
+They are all tested in  `multiview-machine-mearning-omis/Code/MonoMutliViewClassifiers/Versions.py` which is automatically checked each time you run the `Exec` script
 
 ### Installing
 
 No installation is needed, just the prerequisites.
 
+### Running on simulated data
+
+In order to run it you'll need to try on **simulated** data with the command
+```
+cd multiview-machine-learning-omis/Code
+python Exec.py -log
+```
+Results will be stored in `multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/Results/`
+
+If no path is specified, hdf5 datasets are stored in `multiview-machine-learning-omis/Data`
+
+
+### Discovering the arguments
+
+In order to see all the arguments of this script and their decription and default values run :
+```
+cd multiview-machine-learning-omis/Code
+python Exec.py -h
+```
+
+
+### Understanding `Results/` architecture
+
+Results are stored in `multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/Results/`
+A directory will be created with the name of the database used to run the script.
+For each time the script is run, a new directory named after the running date and time will be created.
+In that directory:
+* If the script is run using more than one statistic iteration, it will create one directory for each iteration and store the statistical analysis in the current directory 
+* If it is run with one iteration, the itration results will be stored in the current directory
+
+The results for each iteration are graphs recaping the classifiers scores and the classifiers config and results are stored in a directory of their own.
+To explore the results run the `Exec` script and go in `multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/Results/Plausible/`
+
+
 ## Running the tests
 
+**/!\ still in development, test sucess is not meaningful ATM /!\\**
+
 In order to run it you'll need to try on simulated data with the command
 ```
-python multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/ExecClassif.py -log
+cd multiview-machine-learning-omis/Code
+python -m unittest discover
 ```
-Results will be stored in multiview-machine-learning-omis/Code/MonoMultiViewClassifiers/Results/
 
 ## Authors
 
 * **Baptiste BAUVIN**
+
+### Contributors
+
+* **Mazid Osseni**
+* **Alexandre Drouin**
+* **Nikolas Huelsmann**
\ No newline at end of file