diff --git a/Code/MonoMultiViewClassifiers/ExecClassif.py b/Code/MonoMultiViewClassifiers/ExecClassif.py
index ae75199fab077ab9a99355e75a7ca4cf75be97d6..d399b6216ec3398622899da0bd1ed6568f8f37bd 100644
--- a/Code/MonoMultiViewClassifiers/ExecClassif.py
+++ b/Code/MonoMultiViewClassifiers/ExecClassif.py
@@ -295,7 +295,7 @@ def execOneBenchmarkMonoCore(DATASET=None, LABELS_DICTIONARY=None, directory=Non
 def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, classificationIndices, directories,
                   directory, multiClassLabels, metrics, labelsDictionary, nbLabels, DATASET,
                   execOneBenchmark=execOneBenchmark, execOneBenchmark_multicore=execOneBenchmark_multicore,
-                  execOneBenchmarkMonoCore=execOneBenchmarkMonoCore, getResults=getResults):
+                  execOneBenchmarkMonoCore=execOneBenchmarkMonoCore, getResults=getResults, delete=DB.deleteHDF5):
     """Used to execute the needed benchmark(s) on multicore or mono-core functions
     The execOneBenchmark and execOneBenchmark_multicore keywords args are only used in the tests"""
     # TODO :  find a way to flag
@@ -320,7 +320,7 @@ def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionari
     if nbCores > 1:
         logging.debug("Start:\t Deleting " + str(nbCores) + " temporary datasets for multiprocessing")
         args = benchmarkArgumentsDictionaries[0]["args"]
-        datasetFiles = DB.deleteHDF5(args.pathF, args.name, nbCores)
+        datasetFiles = delete(args.pathF, args.name, nbCores)
         logging.debug("Start:\t Deleting datasets for multiprocessing")
     # Do everything with flagging
     nbExamples = len(classificationIndices[0][0])+len(classificationIndices[0][1])
diff --git a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py
index 13550b2fad2372076a8e1c56755c25096bd9823b..3a6517a1a0f5872c7d95944c091c1a1896dcae3a 100644
--- a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py
+++ b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py
@@ -31,7 +31,7 @@ def score(y_true, y_pred, **kwargs):
             average = "micro"
         else:
             average = "binary"
-    
+
     score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels, pos_label=pos_label, average=average)
 
     return score
diff --git a/Code/Tests/test_ExecClassif.py b/Code/Tests/test_ExecClassif.py
index 9a8dabd537f54687d2d76a8752b7a10ec807afe2..0577fb209acd8495dffdc4866544e72de5a439ae 100644
--- a/Code/Tests/test_ExecClassif.py
+++ b/Code/Tests/test_ExecClassif.py
@@ -2,6 +2,7 @@ import unittest
 import argparse
 import os
 import numpy as np
+import h5py
 from sklearn.metrics import accuracy_score
 
 from ..MonoMultiViewClassifiers import ExecClassif
@@ -36,59 +37,68 @@ class Test_initMonoviewArguments(unittest.TestCase):
         arguments = ExecClassif.initMonoviewExps(benchmark, {}, {}, 0, {})
 
 
-def fakeBenchmarkExec(coreIndex=-1, a=7):
+def fakeBenchmarkExec(coreIndex=-1, a=7, args=1):
     return [coreIndex, a]
 
 
-def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6):
+def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6, args=1):
     return [nbCores,a]
 
-def fakeBenchmarkExec_monocore(DATASET=1, a=4):
-    return [DATASET, a]
+def fakeBenchmarkExec_monocore(DATASET=1, a=4, args=1):
+    return [a]
 
 def fakegetResults(results, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, multiClassLabels, metrics,
                    classificationIndices, directories, directory, labelsDictionary, nbExamples, nbLabels):
     return 3
 
+def fakeDelete(a, b,c):
+    return 9
+
 class Test_execBenchmark(unittest.TestCase):
 
     @classmethod
     def setUpClass(cls):
-
-        cls.argumentDictionaries = [{"a": 4}]
+        os.mkdir("Code/Tests/tmp_tests")
+        cls.Dataset = h5py.File("Code/Tests/tmp_tests/test_file.hdf5", "w")
+        cls.labels = cls.Dataset.create_dataset("Labels", data=np.array([0, 1, 2]))
+        cls.argumentDictionaries = [{"a": 4, "args":FakeArg()}]
 
     def test_simple(cls):
-        res = ExecClassif.execBenchmark(1,2,3,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11, execOneBenchmark=fakeBenchmarkExec,
+        res = ExecClassif.execBenchmark(1,2,3,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset, execOneBenchmark=fakeBenchmarkExec,
                                         execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore, getResults=fakegetResults)
-        cls.assertEqual(res, [[11,4]])
+                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore, getResults=fakegetResults, delete=fakeDelete)
+        cls.assertEqual(res, [[4]])
 
     def test_multiclass_no_iter(cls):
-        cls.argumentDictionaries = [{"a": 10}, {"a": 4}]
-        res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
+        cls.argumentDictionaries = [{"a": 10, "args":FakeArg()}, {"a": 4, "args":FakeArg()}]
+        res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
                                         execOneBenchmark=fakeBenchmarkExec,
                                         execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
                                         execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
-                                        getResults=fakegetResults)
+                                        getResults=fakegetResults, delete=fakeDelete)
         cls.assertEqual(res, [[0,10], [1,4]])
 
     def test_multiclass_and_iter(cls):
-        cls.argumentDictionaries = [{"a": 10}, {"a": 4}, {"a": 55}, {"a": 24}]
-        res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
+        cls.argumentDictionaries = [{"a": 10, "args":FakeArg()}, {"a": 4, "args":FakeArg()}, {"a": 55, "args":FakeArg()}, {"a": 24, "args":FakeArg()}]
+        res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
                                         execOneBenchmark=fakeBenchmarkExec,
                                         execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
                                         execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
-                                        getResults=fakegetResults)
+                                        getResults=fakegetResults, delete=fakeDelete)
         cls.assertEqual(res, [[0,10], [1,4], [0,55], [1,24]])
 
     def test_no_iter_biclass_multicore(cls):
-        res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, 11,
+        res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries,[[[1,2], [3,4,5]]], 5, 6, 7, 8, 9, 10, cls.Dataset,
                                         execOneBenchmark=fakeBenchmarkExec,
                                         execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
                                         execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore,
-                                        getResults=fakegetResults)
+                                        getResults=fakegetResults, delete=fakeDelete)
         cls.assertEqual(res, [[2,4]])
 
+    @classmethod
+    def tearDownClass(cls):
+        os.remove("Code/Tests/tmp_tests/test_file.hdf5")
+        os.rmdir("Code/Tests/tmp_tests")
 
 def fakeExecMono(directory, name, labelsNames, classificationIndices, kFolds, coreIndex, type, pathF, randomState, labels,
                  hyperParamSearch="try", metrics="try", nIter=1, **arguments):