diff --git a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
index 06e66549a8a04707e7874ee979f967bbdf63daae..2f92d2c5a590491cebfd907f4533a7d736d76461 100644
--- a/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
+++ b/multiview_platform/MonoMultiViewClassifiers/ExecClassif.py
@@ -573,7 +573,7 @@ def execBenchmark(nbCores, statsIter, nbMulticlass,
                multiclassGroundTruth, metrics, classificationIndices,
                directories, directory, labelsDictionary, nbExamples, nbLabels)
     logging.debug("Done:\t Analyzing predictions")
-
+    DATASET.close()
     return results
 
 
@@ -594,76 +594,80 @@ def execClassif(arguments):
     multiviewAlgos = args.CL_algos_multiview
     dataset_list = execution.find_dataset_names(args.pathF, args.type, args.name)
 
-    for name in dataset_list:
-
-        directory = execution.initLogFile(name, args.views, args.CL_type,
-                                          args.log, args.debug, args.label,
-                                          args.res_dir, args.add_noise, args.noise_std)
-        randomState = execution.initRandomState(args.randomState, directory)
-        statsIterRandomStates = execution.initStatsIterRandomStates(statsIter,
-                                                                    randomState)
-
-        getDatabase = execution.getDatabaseFunction(name, args.type)
-
-        DATASET, LABELS_DICTIONARY, datasetname = getDatabase(args.views,
-                                                              args.pathF, name,
-                                                              args.CL_nbClass,
-                                                              args.CL_classes,
-                                                              randomState,
-                                                              args.full,
-                                                              args.add_noise,
-                                                              args.noise_std)
-        args.name = datasetname
-
-        splits = execution.genSplits(DATASET.get("Labels").value, args.CL_split,
-                                     statsIterRandomStates)
-
-        multiclassLabels, labelsCombinations, indicesMulticlass = Multiclass.genMulticlassLabels(
-            DATASET.get("Labels").value, multiclassMethod, splits)
+    if not args.add_noise:
+        args.noise_std=[0.0]
 
-        kFolds = execution.genKFolds(statsIter, args.CL_nbFolds,
-                                     statsIterRandomStates)
-
-        datasetFiles = Dataset.initMultipleDatasets(args.pathF, args.name, nbCores)
-
-        # if not views:
-        #     raise ValueError("Empty views list, modify selected views to match dataset " + args.views)
-
-        views, viewsIndices, allViews = execution.initViews(DATASET, args.views)
-        viewsDictionary = genViewsDictionnary(DATASET, views)
-        nbViews = len(views)
-        NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
-
-        metrics = [metric.split(":") for metric in args.CL_metrics]
-        if metrics == [[""]]:
-            metricsNames = [name for _, name, isPackage
-                            in pkgutil.iter_modules(
-                    ['./MonoMultiViewClassifiers/Metrics']) if
-                            not isPackage and name not in ["framework", "log_loss",
-                                                           "matthews_corrcoef",
-                                                           "roc_auc_score"]]
-            metrics = [[metricName] for metricName in metricsNames]
-            metrics = arangeMetrics(metrics, args.CL_metric_princ)
-        for metricIndex, metric in enumerate(metrics):
-            if len(metric) == 1:
-                metrics[metricIndex] = [metric[0], None]
-
-        benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args)
-        initKWARGS = initKWARGSFunc(args, benchmark)
-        dataBaseTime = time.time() - start
-        argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary,
-                                                NB_CLASS, initKWARGS)
-        directories = execution.genDirecortiesNames(directory, statsIter)
-        benchmarkArgumentDictionaries = execution.genArgumentDictionaries(
-            LABELS_DICTIONARY, directories, multiclassLabels,
-            labelsCombinations, indicesMulticlass,
-            hyperParamSearch, args, kFolds,
-            statsIterRandomStates, metrics,
-            argumentDictionaries, benchmark, nbViews,
-            views, viewsIndices)
-        nbMulticlass = len(labelsCombinations)
-
-        execBenchmark(nbCores, statsIter, nbMulticlass,
-                      benchmarkArgumentDictionaries, splits, directories,
-                      directory, multiclassLabels, metrics, LABELS_DICTIONARY,
-                      NB_CLASS, DATASET)
+    for name in dataset_list:
+        for noise_std in args.noise_std:
+
+            directory = execution.initLogFile(name, args.views, args.CL_type,
+                                              args.log, args.debug, args.label,
+                                              args.res_dir, args.add_noise, noise_std)
+            randomState = execution.initRandomState(args.randomState, directory)
+            statsIterRandomStates = execution.initStatsIterRandomStates(statsIter,
+                                                                        randomState)
+
+            getDatabase = execution.getDatabaseFunction(name, args.type)
+
+            DATASET, LABELS_DICTIONARY, datasetname = getDatabase(args.views,
+                                                                  args.pathF, name,
+                                                                  args.CL_nbClass,
+                                                                  args.CL_classes,
+                                                                  randomState,
+                                                                  args.full,
+                                                                  args.add_noise,
+                                                                  noise_std)
+            args.name = datasetname
+
+            splits = execution.genSplits(DATASET.get("Labels").value, args.CL_split,
+                                         statsIterRandomStates)
+
+            multiclassLabels, labelsCombinations, indicesMulticlass = Multiclass.genMulticlassLabels(
+                DATASET.get("Labels").value, multiclassMethod, splits)
+
+            kFolds = execution.genKFolds(statsIter, args.CL_nbFolds,
+                                         statsIterRandomStates)
+
+            datasetFiles = Dataset.initMultipleDatasets(args.pathF, args.name, nbCores)
+
+            # if not views:
+            #     raise ValueError("Empty views list, modify selected views to match dataset " + args.views)
+
+            views, viewsIndices, allViews = execution.initViews(DATASET, args.views)
+            viewsDictionary = genViewsDictionnary(DATASET, views)
+            nbViews = len(views)
+            NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
+
+            metrics = [metric.split(":") for metric in args.CL_metrics]
+            if metrics == [[""]]:
+                metricsNames = [name for _, name, isPackage
+                                in pkgutil.iter_modules(
+                        ['./MonoMultiViewClassifiers/Metrics']) if
+                                not isPackage and name not in ["framework", "log_loss",
+                                                               "matthews_corrcoef",
+                                                               "roc_auc_score"]]
+                metrics = [[metricName] for metricName in metricsNames]
+                metrics = arangeMetrics(metrics, args.CL_metric_princ)
+            for metricIndex, metric in enumerate(metrics):
+                if len(metric) == 1:
+                    metrics[metricIndex] = [metric[0], None]
+
+            benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args)
+            initKWARGS = initKWARGSFunc(args, benchmark)
+            dataBaseTime = time.time() - start
+            argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary,
+                                                    NB_CLASS, initKWARGS)
+            directories = execution.genDirecortiesNames(directory, statsIter)
+            benchmarkArgumentDictionaries = execution.genArgumentDictionaries(
+                LABELS_DICTIONARY, directories, multiclassLabels,
+                labelsCombinations, indicesMulticlass,
+                hyperParamSearch, args, kFolds,
+                statsIterRandomStates, metrics,
+                argumentDictionaries, benchmark, nbViews,
+                views, viewsIndices)
+            nbMulticlass = len(labelsCombinations)
+
+            execBenchmark(nbCores, statsIter, nbMulticlass,
+                          benchmarkArgumentDictionaries, splits, directories,
+                          directory, multiclassLabels, metrics, LABELS_DICTIONARY,
+                          NB_CLASS, DATASET)
diff --git a/multiview_platform/MonoMultiViewClassifiers/utils/execution.py b/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
index 3c0b8fee52f1b68c0912d9f07db6f12be69f7d0e..dc8507bbc4f305c693d4bd213811aa9058927002 100644
--- a/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
+++ b/multiview_platform/MonoMultiViewClassifiers/utils/execution.py
@@ -64,10 +64,10 @@ def parseTheArgs(arguments):
                                help='Use option to bebug implemented algorithms')
     groupStandard.add_argument('-add_noise', action='store_true',
                                help='Use option to add noise to the data')
-    groupStandard.add_argument('--noise_std', metavar='FLOAT', action='store',
+    groupStandard.add_argument('--noise_std', metavar='FLOAT', nargs="+", action='store',
                                help='The std of the gaussian noise that will '
                                     'be added to the data.',
-                               type=float, default=0.15)
+                               type=float, default=[0.0])
     groupStandard.add_argument('--res_dir', metavar='STRING', action='store',
                                help='The path to the result directory',
                                default="../Results/")
@@ -800,10 +800,7 @@ def initLogFile(name, views, CL_type, log, debug, label, result_directory, add_n
     resultsDirectory : string
         Reference to the main results directory for the benchmark.
     """
-    if add_noise:
-        noise_string = "_n_"+str(int(noise_std*100))
-    else:
-        noise_string = ""
+    noise_string = "/n_"+str(int(noise_std*100))
     if debug:
         resultDirectory = result_directory + name + noise_string +"/debug_started_" + time.strftime(
             "%Y_%m_%d-%H_%M_%S") + "_" + label + "/"