diff --git a/Code/MonoMultiViewClassifiers/ExecClassif.py b/Code/MonoMultiViewClassifiers/ExecClassif.py
index 7e61ecf786007e1b4e77b6f08ad75dc364e64d32..4331e97a625d097a66731ade7f9d6e6582e49552 100644
--- a/Code/MonoMultiViewClassifiers/ExecClassif.py
+++ b/Code/MonoMultiViewClassifiers/ExecClassif.py
@@ -339,7 +339,7 @@ def publishMulticlassResults(multiclassResults, metrics, statsIter, argumentDict
     pass
 
 
-def analyzeMulticlass(results, statsIter, argumentDictionaries, nbExamples, nbLabels, multiclassLabels, metrics):
+def analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, nbExamples, nbLabels, multiclassLabels, metrics):
     """Used to tranform one versus one results in multiclass results and to publish it"""
     multiclassResults = [{} for _ in range(statsIter)]
     for iterIndex in range(statsIter):
@@ -357,9 +357,9 @@ def analyzeMulticlass(results, statsIter, argumentDictionaries, nbExamples, nbLa
     for iterIndex, multiclassiterResult in enumerate(multiclassResults):
         for key, value in multiclassiterResult.items():
             multiclassResults[iterIndex][key] = {"labels": np.argmax(value, axis=1)}
-    multiclassResults = genMetricsScores(multiclassResults, multiclassLabels, metrics, argumentDictionaries)
+    multiclassResults = genMetricsScores(multiclassResults, multiclassLabels, metrics, benchmarkArgumentDictionaries)
     multiclassResults = getErrorOnLabels(multiclassResults, multiclassLabels)
-    publishMulticlassResults(multiclassResults, metrics, statsIter, argumentDictionaries)
+    publishMulticlassResults(multiclassResults, metrics, statsIter, benchmarkArgumentDictionaries)
     return multiclassResults
 
 
@@ -373,11 +373,11 @@ def analyzeIter(results):
     pass
 
 
-def getResults(results, statsIter, nbMulticlass, argumentDictionaries, multiclassLabels, metrics):
+def getResults(results, statsIter, nbMulticlass, benchmarkArgumentDictionaries, multiclassLabels, metrics):
     if statsIter > 1:
         if nbMulticlass > 1:
             analyzeBiclass(results)
-            multiclassResults = analyzeMulticlass(results, statsIter, argumentDictionaries, multiclassLabels, metrics)
+            multiclassResults = analyzeMulticlass(results, statsIter, benchmarkArgumentDictionaries, multiclassLabels, metrics)
             analyzeIter(multiclassResults)
         else:
             biclassResults = analyzeBiclass(results)
@@ -391,15 +391,25 @@ def getResults(results, statsIter, nbMulticlass, argumentDictionaries, multiclas
 
 def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, classificationIndices=None, args=None,
                      kFolds=None, randomState=None, hyperParamSearch=None, metrics=None, argumentDictionaries=None,
-                     benchmark=None, views=None, viewsIndices=None, flag=None, ExecMonoview_multicore=ExecMonoview_multicore,
-                     ExecMultiview_multicore=ExecMultiview_multicore, initMultiviewArguments=initMultiviewArguments):
+                     benchmark=None, views=None, viewsIndices=None, flag=None, labels=None,
+                     ExecMonoview_multicore=ExecMonoview_multicore, ExecMultiview_multicore=ExecMultiview_multicore,
+                     initMultiviewArguments=initMultiviewArguments):
     """Used to run a benchmark using one core. ExecMonoview_multicore, initMultiviewArguments and
      ExecMultiview_multicore args are only used for tests"""
+    if not os.path.exists(os.path.dirname(directory + "train_labels.csv")):
+        try:
+            os.makedirs(os.path.dirname(directory + "train_labels.csv"))
+        except OSError as exc:
+            if exc.errno != errno.EEXIST:
+                raise
+    trainIndices, testIndices = classificationIndices
+    trainLabels = labels[trainIndices]
+    np.savetxt(directory + "train_labels.csv", trainLabels, delimiter=",")
     resultsMonoview = []
     labelsNames = list(LABELS_DICTIONARY.values())
     np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",")
     resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
-                                               coreIndex, args.type, args.pathF, randomState,
+                                               coreIndex, args.type, args.pathF, randomState, labels,
                                                hyperParamSearch=hyperParamSearch, metrics=metrics,
                                                nIter=args.CL_GS_iter, **argument)
                         for argument in argumentDictionaries["Monoview"]]
@@ -410,7 +420,7 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, class
     resultsMultiview = []
     resultsMultiview += [
         ExecMultiview_multicore(directory, coreIndex, args.name, classificationIndices, kFolds, args.type,
-                                args.pathF, LABELS_DICTIONARY, randomState, hyperParamSearch=hyperParamSearch,
+                                args.pathF, LABELS_DICTIONARY, randomState, labels, hyperParamSearch=hyperParamSearch,
                                 metrics=metrics, nIter=args.CL_GS_iter, **arguments)
         for arguments in argumentDictionaries["Multiview"]]
     return [flag, resultsMonoview, resultsMultiview]
@@ -418,9 +428,21 @@ def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, class
 
 def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=None, classificationIndices=None, args=None,
                                kFolds=None, randomState=None, hyperParamSearch=None, metrics=None, argumentDictionaries=None,
-                               benchmark=None, views=None, viewsIndices=None, flag=None, ExecMonoview_multicore=ExecMonoview_multicore,
-                               ExecMultiview_multicore=ExecMultiview_multicore, initMultiviewArguments=initMultiviewArguments):
-
+                               benchmark=None, views=None, viewsIndices=None, flag=None, labels=None,
+                               ExecMonoview_multicore=ExecMonoview_multicore,
+                               ExecMultiview_multicore=ExecMultiview_multicore,
+                               initMultiviewArguments=initMultiviewArguments):
+    """Used to run a benchmark using multiple cores. ExecMonoview_multicore, initMultiviewArguments and
+     ExecMultiview_multicore args are only used for tests"""
+    if not os.path.exists(os.path.dirname(directory + "train_labels.csv")):
+        try:
+            os.makedirs(os.path.dirname(directory + "train_labels.csv"))
+        except OSError as exc:
+            if exc.errno != errno.EEXIST:
+                raise
+    trainIndices, testIndices = classificationIndices
+    trainLabels = labels[trainIndices]
+    np.savetxt(directory + "train_labels.csv", trainLabels, delimiter=",")
     np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",")
     resultsMonoview = []
     labelsNames = list(LABELS_DICTIONARY.values())
@@ -430,7 +452,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=Non
     for stepIndex in range(nbMulticoreToDo):
         resultsMonoview += (Parallel(n_jobs=nbCores)(
             delayed(ExecMonoview_multicore)(directory, args.name, labelsNames, classificationIndices, kFolds,
-                                            coreIndex, args.type, args.pathF, randomState,
+                                            coreIndex, args.type, args.pathF, randomState, labels,
                                             hyperParamSearch=hyperParamSearch,
                                             metrics=metrics, nIter=args.CL_GS_iter,
                                             **argumentDictionaries["Monoview"][coreIndex + stepIndex * nbCores])
@@ -445,7 +467,7 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=Non
     for stepIndex in range(nbMulticoreToDo):
         resultsMultiview += Parallel(n_jobs=nbCores)(
             delayed(ExecMultiview_multicore)(directory, coreIndex, args.name, classificationIndices, kFolds,
-                                             args.type, args.pathF, LABELS_DICTIONARY, randomState,
+                                             args.type, args.pathF, LABELS_DICTIONARY, randomState, labels,
                                              hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=args.CL_GS_iter,
                                              **argumentDictionaries["Multiview"][stepIndex * nbCores + coreIndex])
             for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores)))
@@ -453,8 +475,17 @@ def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=Non
     return [flag, resultsMonoview, resultsMultiview]
 
 
-def execBenchmark(nbCores, statsIter, nbMulticlass, argumentsDictionaries, multiclassLabels,
-                  execOneBenchmark=execOneBenchmark, execOneBenchmark_multicore=execOneBenchmark_multicore):
+def execOneBenchmarkMonoCore(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, classificationIndices=None, args=None,
+                             kFolds=None, randomState=None, hyperParamSearch=None, metrics=None, argumentDictionaries=None,
+                             benchmark=None, views=None, viewsIndices=None, flag=None, labels=None,
+                             ExecMonoview_multicore=ExecMonoview_multicore, ExecMultiview_multicore=ExecMultiview_multicore,
+                             initMultiviewArguments=initMultiviewArguments):
+    pass
+
+
+def execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentsDictionaries,
+                  execOneBenchmark=execOneBenchmark, execOneBenchmark_multicore=execOneBenchmark_multicore,
+                  execOneBenchmarkMonoCore=execOneBenchmarkMonoCore):
     """Used to execute the needed benchmark(s) on multicore or mono-core functions
     The execOneBenchmark and execOneBenchmark_multicore keywords args are only used in the tests"""
     # TODO :  find a way to flag
@@ -463,24 +494,24 @@ def execBenchmark(nbCores, statsIter, nbMulticlass, argumentsDictionaries, multi
     results = []
     if nbCores > 1:
         if statsIter > 1 or nbMulticlass > 1:
-            nbExpsToDo = nbMulticlass*statsIter
+            nbExpsToDo = len(benchmarkArgumentsDictionaries)
             nbMulticoreToDo = range(int(math.ceil(float(nbExpsToDo) / nbCores)))
             for stepIndex in nbMulticoreToDo:
                 results += (Parallel(n_jobs=nbCores)(delayed(execOneBenchmark)
                                                      (coreIndex=coreIndex,
-                                                      **argumentsDictionaries[coreIndex + stepIndex * nbCores])
-                    for coreIndex in range(min(nbCores, nbExpsToDo - stepIndex * nbCores))))
+                                                      **benchmarkArgumentsDictionaries[coreIndex + stepIndex * nbCores])
+                            for coreIndex in range(min(nbCores, nbExpsToDo - stepIndex * nbCores))))
         else:
-            results += [execOneBenchmark_multicore(nbCores=nbCores, **argumentsDictionaries[0])]
+            results += [execOneBenchmark_multicore(nbCores=nbCores, **benchmarkArgumentsDictionaries[0])]
     else:
-        for arguments in argumentsDictionaries:
-            results += [execOneBenchmark(**arguments)]
+        for arguments in benchmarkArgumentsDictionaries:
+            results += [execOneBenchmarkMonoCore(**arguments)]
     logging.debug("Done:\t Executing all the needed biclass benchmarks")
 
     # Do everything with flagging
 
     logging.debug("Start:\t Analyzing preds")
-    # getResults(results, statsIter, nbMulticlass, argumentsDictionaries, multiclassLabels, metrics)
+    # getResults(results, statsIter, nbMulticlass, benchmarkArgumentsDictionaries, multiclassLabels, metrics)
     logging.debug("Done:\t Analyzing preds")
 
     return results
@@ -552,7 +583,33 @@ def execClassif(arguments):
     directories = execution.genDirecortiesNames(directory, statsIter, labelsCombinations,
                                                 multiclassMethod, LABELS_DICTIONARY)
     # TODO : Gen arguments dictionaries
-    benchmarkArgumentDictionaries = execution.genArgumentDictionaries(LABELS_DICTIONARY, directories, multiclassLabels, labelsCombinations, oldIndicesMulticlass, hyperParamSearch, args, kFolds, statsIterRandomStates, metrics, argumentDictionaries, benchmark)
+    benchmarkArgumentDictionaries = execution.genArgumentDictionaries(LABELS_DICTIONARY, directories, multiclassLabels,
+                                                                      labelsCombinations, oldIndicesMulticlass,
+                                                                      hyperParamSearch, args, kFolds,
+                                                                      statsIterRandomStates, metrics,
+                                                                      argumentDictionaries, benchmark)
+
+    nbMulticlass = len(labelsCombinations)
+
+    execBenchmark(nbCores, statsIter, nbMulticlass, benchmarkArgumentDictionaries)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
 
     if statsIter > 1:
         logging.debug("Start:\t Benchmark classification")
diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
index 085b3481b1ef450f0f48b2f67cb11693b0b0363c..3672e9d09c58bfa420e8507da89680d1798d3684 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
@@ -89,7 +89,7 @@ def saveResults(stringAnalysis, outputFileName, full_labels_pred, y_train_pred,
 
 
 def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices, KFolds, datasetFileIndex, databaseType,
-                           path, randomState, hyperParamSearch="randomizedSearch",
+                           path, randomState, labels, hyperParamSearch="randomizedSearch",
                            metrics=[["accuracy_score", None]], nIter=30, **args):
     DATASET = h5py.File(path + name + str(datasetFileIndex) + ".hdf5", "r")
     kwargs = args["args"]
@@ -97,7 +97,7 @@ def ExecMonoview_multicore(directory, name, labelsNames, classificationIndices,
              range(DATASET.get("Metadata").attrs["nbView"])]
     neededViewIndex = views.index(kwargs["feat"])
     X = DATASET.get("View" + str(neededViewIndex))
-    Y = DATASET.get("Labels").value
+    Y = labels
     return ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFolds, 1, databaseType, path,
                         randomState, hyperParamSearch=hyperParamSearch,
                         metrics=metrics, nIter=nIter, **args)
@@ -143,8 +143,8 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol
 
     logging.debug("Start:\t Predicting")
     full_labels_pred = cl_res.predict(X)
-    y_train_pred = full_labels_pred[classificationIndices[0]]
-    y_test_pred = full_labels_pred[classificationIndices[1]]
+    y_train_pred = cl_res.predict(X[classificationIndices[0]])
+    y_test_pred = cl_res.predict(X[classificationIndices[1]])
     logging.debug("Done:\t Predicting")
 
     t_end = time.time() - t_start
diff --git a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
index 5dfaf35115d4ab9ffac05d5b9f82a54eddd942bb..7f4e16a1098d86efde375348b05c01640152bef6 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
@@ -68,17 +68,17 @@ def saveResults(LABELS_DICTIONARY, stringAnalysis, views, classifierModule, clas
 
 
 def ExecMultiview_multicore(directory, coreIndex, name, learningRate, nbFolds, databaseType, path, LABELS_DICTIONARY,
-                            randomState,
+                            randomState, labels,
                             hyperParamSearch=False, nbCores=1, metrics=None, nIter=30, **arguments):
     """Used to load an HDF5 dataset for each parallel job and execute multiview classification"""
     DATASET = h5py.File(path + name + str(coreIndex) + ".hdf5", "r")
     return ExecMultiview(directory, DATASET, name, learningRate, nbFolds, 1, databaseType, path, LABELS_DICTIONARY,
-                         randomState,
+                         randomState, labels,
                          hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=nIter, **arguments)
 
 
 def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCores, databaseType, path,
-                  LABELS_DICTIONARY, randomState,
+                  LABELS_DICTIONARY, randomState, labels,
                   hyperParamSearch=False, metrics=None, nIter=30, **kwargs):
     """Used to execute multiview classification and result analysis"""
     logging.debug("Start:\t Initialize constants")
@@ -106,7 +106,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
 
     logging.debug("Start:\t Optimizing hyperparameters")
     if hyperParamSearch != "None":
-        classifier = HyperParameterSearch.searchBestSettings(DATASET, classifierPackage,
+        classifier = HyperParameterSearch.searchBestSettings(DATASET, labels, classifierPackage,
                                                              CL_type, metrics, learningIndices,
                                                              KFolds, randomState,
                                                              viewsIndices=viewsIndices,
@@ -117,7 +117,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
     logging.debug("Done:\t Optimizing hyperparameters")
 
     logging.debug("Start:\t Fitting classifier")
-    classifier.fit_hdf5(DATASET, trainIndices=learningIndices, viewsIndices=viewsIndices, metric=metrics[0])
+    classifier.fit_hdf5(DATASET, labels, trainIndices=learningIndices, viewsIndices=viewsIndices, metric=metrics[0])
     logging.debug("Done:\t Fitting classifier")
 
     logging.debug("Start:\t Predicting")
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
index b4089aba5fbfe4616c230c6c24e6b1987c8d07c2..000301343a072331cda8f31e8baa4b0afecfcdfa 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
@@ -167,8 +167,8 @@ class FusionClass:
     def setParams(self, paramsSet):
         self.classifier.setParams(paramsSet)
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None, metric=["f1_score", None]):
-        self.classifier.fit_hdf5(DATASET, trainIndices=trainIndices, viewsIndices=viewsIndices)
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None, metric=["f1_score", None]):
+        self.classifier.fit_hdf5(DATASET, labels, trainIndices=trainIndices, viewsIndices=viewsIndices)
 
     def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
         if usedIndices is None:
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/EarlyFusionPackage/WeightedLinear.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/EarlyFusionPackage/WeightedLinear.py
index b32c631985907d2cc29247819137388de1b00678..c5f72caee1118897968f2da9a75fdcaaa40a3ec5 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/EarlyFusionPackage/WeightedLinear.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/EarlyFusionPackage/WeightedLinear.py
@@ -71,7 +71,7 @@ class WeightedLinear(EarlyFusionClassifier):
         else:
             self.weights = np.array(map(float, kwargs['fusionMethodConfig']))
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None):
         if type(viewsIndices) == type(None):
             viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
         if trainIndices is None:
@@ -80,7 +80,7 @@ class WeightedLinear(EarlyFusionClassifier):
         self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=trainIndices, viewsIndices=viewsIndices)
         monoviewClassifierModule = getattr(MonoviewClassifiers, self.monoviewClassifierName)
         self.monoviewClassifier = monoviewClassifierModule.fit(self.monoviewData,
-                                                               DATASET.get("Labels").value[trainIndices],
+                                                               labels[trainIndices],
                                                                self.randomState,
                                                                NB_CORES=self.nbCores,
                                                                **self.monoviewClassifiersConfig)
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusion.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusion.py
index 34b17cbc7b4b821cd8d39621d8e184e40f54eb36..ff6d3ea378add33a5739dceb75c146be04944ec5 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusion.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusion.py
@@ -133,7 +133,7 @@ class LateFusionClassifier(object):
         self.monoviewSelection = monoviewSelection
         self.randomState = randomState
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None):
         if type(viewsIndices) == type(None):
             viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
         if trainIndices is None:
@@ -142,6 +142,6 @@ class LateFusionClassifier(object):
         self.monoviewClassifiers = Parallel(n_jobs=self.nbCores)(
                 delayed(fitMonoviewClassifier)(self.monoviewClassifiersNames[index],
                                                getV(DATASET, viewIndex, trainIndices),
-                                               DATASET.get("Labels").value[trainIndices],
+                                               labels[trainIndices],
                                                self.monoviewClassifiersConfigs[index], self.needProbas, self.randomState)
                 for index, viewIndex in enumerate(viewsIndices))
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SCMForLinear.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SCMForLinear.py
index 43a22a2fde4b08c94f434d8a7723e3514e977719..36e764d0d71634ef3a58a66695b8908b038cb12d 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SCMForLinear.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SCMForLinear.py
@@ -114,7 +114,7 @@ class SCMForLinear(LateFusionClassifier):
         self.order = paramsSet[3]
         self.modelType = paramsSet[2]
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None):
         if viewsIndices is None:
             viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
         if trainIndices is None:
@@ -123,7 +123,7 @@ class SCMForLinear(LateFusionClassifier):
             monoviewClassifier = getattr(MonoviewClassifiers, self.monoviewClassifiersNames[index])
             self.monoviewClassifiers.append(
                 monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices),
-                                       DATASET.get("Labels").value[trainIndices], self.randomState,
+                                       labels[trainIndices], self.randomState,
                                        NB_CORES=self.nbCores,
                                        **self.monoviewClassifiersConfigs[index]))
         self.SCMForLinearFusionFit(DATASET, usedIndices=trainIndices, viewsIndices=viewsIndices)
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SVMForLinear.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SVMForLinear.py
index b4b8ef04c4305331153d1dac2b75314722ac2b36..2bf076223fcd059176c00bfb868ab7e56a598099 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SVMForLinear.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/Methods/LateFusionPackage/SVMForLinear.py
@@ -57,7 +57,7 @@ class SVMForLinear(LateFusionClassifier):
                                       NB_CORES=NB_CORES)
         self.SVMClassifier = None
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None):
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None):
         if viewsIndices is None:
             viewsIndices = np.arange(DATASET.get("Metadata").attrs["nbView"])
         if trainIndices is None:
@@ -72,7 +72,7 @@ class SVMForLinear(LateFusionClassifier):
                                             for configIndex, config in enumerate(self.monoviewClassifiersConfigs[index]))
                 self.monoviewClassifiers.append(
                     monoviewClassifier.fit(getV(DATASET, viewIndex, trainIndices),
-                                           DATASET.get("Labels").value[trainIndices], self.randomState,
+                                           labels[trainIndices], self.randomState,
                                            NB_CORES=self.nbCores,
                                            **self.monoviewClassifiersConfigs[index]))
         else:
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/MumboModule.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/MumboModule.py
index ac1361617485c08a25d8b3f1e4246a76b3096a28..4399ba3b4c3aef71010fe65e1a3312826508a690 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/MumboModule.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/MumboModule.py
@@ -199,7 +199,7 @@ class MumboClass:
         self.predictions = np.zeros((self.maxIter, nbView, trainLength))
         self.generalFs = np.zeros((self.maxIter, trainLength, nbClass))
 
-    def fit_hdf5(self, DATASET, trainIndices=None, viewsIndices=None, metric=["f1_score", None]):
+    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None, metric=["f1_score", None]):
 
         # Initialization
         if self.classifiersConfigs is None:
@@ -212,7 +212,7 @@ class MumboClass:
             NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
             NB_VIEW = len(viewsIndices)
             trainLength = len(trainIndices)
-            LABELS = DATASET.get("Labels").value[trainIndices]
+            LABELS = labels[trainIndices]
             self.initDataDependant(trainLength, NB_VIEW, NB_CLASS, LABELS)
             # Learning
             isStabilized = False
diff --git a/Code/MonoMultiViewClassifiers/utils/HyperParameterSearch.py b/Code/MonoMultiViewClassifiers/utils/HyperParameterSearch.py
index d534a7e58896a915a75cdb0dcf9697b66cad0a5a..69ea430385e074e50e2e14d62a2c68b5f9b85328 100644
--- a/Code/MonoMultiViewClassifiers/utils/HyperParameterSearch.py
+++ b/Code/MonoMultiViewClassifiers/utils/HyperParameterSearch.py
@@ -6,14 +6,14 @@ import itertools
 from .. import Metrics
 
 
-def searchBestSettings(dataset, classifierPackage, classifierName, metrics, iLearningIndices, iKFolds, randomState, viewsIndices=None,
+def searchBestSettings(dataset, labels, classifierPackage, classifierName, metrics, iLearningIndices, iKFolds, randomState, viewsIndices=None,
                        searchingTool="hyperParamSearch", nIter=1, **kwargs):
     """Used to select the right hyperparam optimization function to optimize hyper parameters"""
     if viewsIndices is None:
         viewsIndices = range(dataset.get("Metadata").attrs["nbView"])
     thismodule = sys.modules[__name__]
     searchingToolMethod = getattr(thismodule, searchingTool)
-    bestSettings = searchingToolMethod(dataset, classifierPackage, classifierName, metrics, iLearningIndices, iKFolds, randomState,
+    bestSettings = searchingToolMethod(dataset, labels, classifierPackage, classifierName, metrics, iLearningIndices, iKFolds, randomState,
                                        viewsIndices=viewsIndices, nIter=nIter, **kwargs)
     return bestSettings  # or well set clasifier ?
 
@@ -23,7 +23,7 @@ def gridSearch(dataset, classifierName, viewsIndices=None, kFolds=None, nIter=1,
     pass
 
 
-def randomizedSearch(dataset, classifierPackage, classifierName, metrics, learningIndices, KFolds, randomState, viewsIndices=None, nIter=1,
+def randomizedSearch(dataset, labels, classifierPackage, classifierName, metrics, learningIndices, KFolds, randomState, viewsIndices=None, nIter=1,
                      nbCores=1, **classificationKWARGS):
     """Used to perform a random search on the classifiers to optimize hyper parameters"""
     if viewsIndices is None:
@@ -45,7 +45,7 @@ def randomizedSearch(dataset, classifierPackage, classifierName, metrics, learni
             baseScore = 1000.0
             isBetter = "lower"
         bestSettings = None
-        kFolds = KFolds.split(learningIndices, dataset.get("Labels").value[learningIndices])
+        kFolds = KFolds.split(learningIndices, labels[learningIndices])
         for paramsSet in paramsSets:
             scores = []
             for trainIndices, testIndices in kFolds:
@@ -54,7 +54,7 @@ def randomizedSearch(dataset, classifierPackage, classifierName, metrics, learni
                 classifier.fit_hdf5(dataset, trainIndices=learningIndices[trainIndices], viewsIndices=viewsIndices)
                 testLabels = classifier.predict_hdf5(dataset, usedIndices=learningIndices[testIndices],
                                                      viewsIndices=viewsIndices)
-                testScore = metricModule.score(dataset.get("Labels").value[learningIndices[testIndices]], testLabels)
+                testScore = metricModule.score(labels[learningIndices[testIndices]], testLabels)
                 scores.append(testScore)
             crossValScore = np.mean(np.array(scores))
 
diff --git a/Code/MonoMultiViewClassifiers/utils/Multiclass.py b/Code/MonoMultiViewClassifiers/utils/Multiclass.py
index d91474dbbcc9c68bb5877efee108900e00a3b221..c10eb2707a0d8e7d15934d616c4d4818e3a7db39 100644
--- a/Code/MonoMultiViewClassifiers/utils/Multiclass.py
+++ b/Code/MonoMultiViewClassifiers/utils/Multiclass.py
@@ -11,22 +11,28 @@ def genMulticlassLabels(labels, multiclassMethod, classificationIndices):
             combinations = itertools.combinations(np.arange(nbLabels), 2)
             multiclassLabels = []
             labelsIndices = []
-            oldIndicesMulticlass = []
+            indicesMulticlass = []
             for combination in combinations:
                 labelsIndices.append(combination)
                 oldIndices = [exampleIndex
                               for exampleIndex, exampleLabel in enumerate(labels)
                               if exampleLabel in combination]
-                oldTrainIndices = [[oldIndex for oldIndex in oldIndicesMulticlass if oldIndex in trainIndices]
-                                   for trainIndices, testIndices in classificationIndices]
-                oldTestIndices = [[oldIndex for oldIndex in oldIndicesMulticlass if oldIndex in testIndices]
-                                  for trainIndices, testIndices in classificationIndices]
-                oldIndicesMulticlass.append([oldTrainIndices, oldTestIndices])
-                multiclassLabels.append(np.array([1 if exampleLabel == combination[0]
-                                                  else 0
-                                                  for exampleLabel in labels[oldIndices]]))
+                trainIndices = [np.array([oldIndex for oldIndex in oldIndices if oldIndex in iterIndices[0]])
+                                   for iterIndices in classificationIndices]
+                testIndices = [np.array([oldIndex for oldIndex in oldIndices if oldIndex in iterindices[1]])
+                                  for iterindices in classificationIndices]
+                indicesMulticlass.append([trainIndices, testIndices])
+                newLabels = np.zeros(len(labels), dtype=int)-100
+                for labelIndex, label in enumerate(labels):
+                    if label == combination[0]:
+                        newLabels[labelIndex] = 1
+                    elif label == combination[1]:
+                        newLabels[labelIndex] = 0
+                    else:
+                        pass
+                multiclassLabels.append(newLabels)
     elif multiclassMethod == "oneVersusRest":
         # TODO : Implement one versus rest if probas are not a problem anymore
         pass
-    return multiclassLabels, labelsIndices, oldIndicesMulticlass
+    return multiclassLabels, labelsIndices, indicesMulticlass
 #
\ No newline at end of file
diff --git a/Code/MonoMultiViewClassifiers/utils/execution.py b/Code/MonoMultiViewClassifiers/utils/execution.py
index 5830b02a57d9f1ccefc26dd0f8e276ea57fe9a16..f6ee39354eda60b96120ef198897201b390fc904 100644
--- a/Code/MonoMultiViewClassifiers/utils/execution.py
+++ b/Code/MonoMultiViewClassifiers/utils/execution.py
@@ -337,7 +337,7 @@ def genArgumentDictionaries(labelsDictionary, directories, multiclassLabels, lab
                                            "labels": multiclassLabels[combinationIndex],
                                            "kFolds": kFolds[iterIndex],
                                            "randomState": iterRandomState,
-                                           "hyperParamSearch": hyperParamSearch, 
+                                           "hyperParamSearch": hyperParamSearch,
                                            "metrics": metrics,
                                            "argumentDictionaries": argumentDictionaries,
                                            "benchmark": benchmark,
diff --git a/Code/Tests/Test_utils/test_multiclass.py b/Code/Tests/Test_utils/test_multiclass.py
index c9a3bfc9c682431cdd113a0a152db4d5b5287313..d430cf24b9be43af7d1a7bd85d87dc3d48168e0d 100644
--- a/Code/Tests/Test_utils/test_multiclass.py
+++ b/Code/Tests/Test_utils/test_multiclass.py
@@ -10,12 +10,19 @@ class Test_genMulticlassLabels(unittest.TestCase):
     def setUpClass(cls):
         cls.random_state = np.random.RandomState(42)
         cls.labels = cls.random_state.randint(0,5,50)
+        cls.testIndices = [cls.random_state.choice(np.arange(50),size=10, replace=False), cls.random_state.choice(np.arange(50),size=10, replace=False)]
+        cls.classificationIndices = [[np.array([_ for _ in range(50) if _ not in cls.testIndices[0]]), cls.testIndices[0]],
+                                     [np.array([_ for _ in range(50) if _ not in cls.testIndices[1]]), cls.testIndices[1]]]
 
     def test_one_versus_one(cls):
-        multiclassLabels, labelsIndices, oldIndicesMulticlass = Multiclass.genMulticlassLabels(cls.labels, "oneVersusOne")
+        multiclassLabels, labelsIndices, oldIndicesMulticlass = Multiclass.genMulticlassLabels(cls.labels, "oneVersusOne", cls.classificationIndices)
         cls.assertEqual(len(multiclassLabels), 10)
         cls.assertEqual(labelsIndices, [(0,1), (0,2), (0,3), (0,4), (1,2), (1,3), (1,4), (2,3), (2,4), (3,4)])
-        np.testing.assert_array_equal(oldIndicesMulticlass[0],
-                                      np.array([5, 13, 15, 18, 20, 23, 24, 27, 33, 38, 39, 41, 43, 44, 45, 46, 48]))
+        np.testing.assert_array_equal(oldIndicesMulticlass[0][0][0],
+                                      np.array([5, 13, 15, 18, 20, 24, 27, 39, 41, 43, 44, 45, 46, 48]))
         np.testing.assert_array_equal(multiclassLabels[0],
-                                      np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0]))
+                                      np.array([-100, -100, -100, -100, -100, 0, -100, -100, -100, -100, -100, -100,
+                                                -100, 0, -100, 0, -100, -100, 1, -100, 0, -100, -100, 1, 1, -100, -100,
+                                                0, -100, -100, -100, -100, -100, 1, -100, -100, -100, -100, 1, 0, -100,
+                                                1, -100, 0, 0, 1, 0, -100, 0, -100 ]))
+
diff --git a/Code/Tests/test_ExecClassif.py b/Code/Tests/test_ExecClassif.py
index 0a62ade3c8431a3c5b8c0479d2db02a7a3c1fc1b..68a3455764235f10160ec06ad0dca6b5a48b1ac3 100644
--- a/Code/Tests/test_ExecClassif.py
+++ b/Code/Tests/test_ExecClassif.py
@@ -43,6 +43,9 @@ def fakeBenchmarkExec(coreIndex=-1, a=7):
 def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6):
     return [nbCores,a]
 
+def fakeBenchmarkExec_monocore(a=4):
+    return a
+
 
 class Test_execBenchmark(unittest.TestCase):
 
@@ -52,35 +55,39 @@ class Test_execBenchmark(unittest.TestCase):
         cls.argumentDictionaries = [{"a": 4}]
 
     def test_simple(cls):
-        res = ExecClassif.execBenchmark(1,1,1,cls.argumentDictionaries, [], execOneBenchmark=fakeBenchmarkExec,
-                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore)
-        cls.assertEqual(res, [[-1,4]])
+        res = ExecClassif.execBenchmark(1,1,1,cls.argumentDictionaries, execOneBenchmark=fakeBenchmarkExec,
+                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore)
+        cls.assertEqual(res, [4])
 
     def test_multiclass_no_iter(cls):
         cls.argumentDictionaries = [{"a": 10}, {"a": 4}]
-        res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries, [], execOneBenchmark=fakeBenchmarkExec,
-                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore)
+        res = ExecClassif.execBenchmark(2,1,2,cls.argumentDictionaries, execOneBenchmark=fakeBenchmarkExec,
+                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore)
         cls.assertEqual(res, [[0,10], [1,4]])
 
     def test_multiclass_and_iter(cls):
         cls.argumentDictionaries = [{"a": 10}, {"a": 4}, {"a": 55}, {"a": 24}]
-        res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries, [], execOneBenchmark=fakeBenchmarkExec,
-                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore)
+        res = ExecClassif.execBenchmark(2,2,2,cls.argumentDictionaries, execOneBenchmark=fakeBenchmarkExec,
+                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore)
         cls.assertEqual(res, [[0,10], [1,4], [0,55], [1,24]])
 
     def test_no_iter_biclass_multicore(cls):
-        res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries, [], execOneBenchmark=fakeBenchmarkExec,
-                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore)
+        res = ExecClassif.execBenchmark(2,1,1,cls.argumentDictionaries, execOneBenchmark=fakeBenchmarkExec,
+                                        execOneBenchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                        execOneBenchmarkMonoCore=fakeBenchmarkExec_monocore)
         cls.assertEqual(res, [[2,4]])
 
 
-def fakeExecMono(directory, name, labelsNames, classificationIndices, kFolds, coreIndex, type, pathF, randomState,
+def fakeExecMono(directory, name, labelsNames, classificationIndices, kFolds, coreIndex, type, pathF, randomState, labels,
                  hyperParamSearch="try", metrics="try", nIter=1, **arguments):
     return ["Mono", arguments]
 
 
 def fakeExecMulti(directory, coreIndex, name, classificationIndices, kFolds, type, pathF, LABELS_DICTIONARY,
-                  randomState, hyperParamSearch="",metrics=None, nIter=1, **arguments):
+                  randomState, labels, hyperParamSearch="",metrics=None, nIter=1, **arguments):
     return ["Multi", arguments]
 
 
@@ -108,7 +115,7 @@ class Test_execOneBenchmark(unittest.TestCase):
         flag, resMono, resMulti = ExecClassif.execOneBenchmark(coreIndex=10,
                                                                LABELS_DICTIONARY={0: "a", 1: "b"},
                                                                directory="Code/Tests/tmp_tests/",
-                                                               classificationIndices=([1,2,3,4], [0,10,20,30,40]),
+                                                               classificationIndices=([1,2,3,4], [0,5,6,7,8]),
                                                                args=FakeArg(),
                                                                kFolds="try",
                                                                randomState="try",
@@ -119,6 +126,7 @@ class Test_execOneBenchmark(unittest.TestCase):
                                                                views="try",
                                                                viewsIndices="try",
                                                                flag=None,
+                                                               labels=np.array([0,1,2,3,4,2,2,12,1,2,1,1,2,1,21]),
                                                                ExecMonoview_multicore=fakeExecMono,
                                                                ExecMultiview_multicore=fakeExecMulti,
                                                                initMultiviewArguments=fakeInitMulti)
@@ -130,6 +138,7 @@ class Test_execOneBenchmark(unittest.TestCase):
     @classmethod
     def tearDownClass(cls):
         os.remove("Code/Tests/tmp_tests/train_indices.csv")
+        os.remove("Code/Tests/tmp_tests/train_labels.csv")
         os.rmdir("Code/Tests/tmp_tests")
 
 
@@ -154,6 +163,7 @@ class Test_execOneBenchmark_multicore(unittest.TestCase):
                                                                views="try",
                                                                viewsIndices="try",
                                                                flag=None,
+                                                               labels=np.array([0,1,2,3,4,2,2,12,1,2,1,1,2,1,21]),
                                                                ExecMonoview_multicore=fakeExecMono,
                                                                ExecMultiview_multicore=fakeExecMulti,
                                                                initMultiviewArguments=fakeInitMulti)
@@ -165,127 +175,129 @@ class Test_execOneBenchmark_multicore(unittest.TestCase):
     @classmethod
     def tearDownClass(cls):
         os.remove("Code/Tests/tmp_tests/train_indices.csv")
+        os.remove("Code/Tests/tmp_tests/train_labels.csv")
         os.rmdir("Code/Tests/tmp_tests")
-
-class Test_analyzeMulticlass(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.flags = [[0, [0,1]], [0, [0,2]], [0, [0,3]], [0, [1,2]], [0, [1,3]], [0, [2,3]],
-                     [1, [0,1]], [1, [0,2]], [1, [0,3]], [1, [1,2]], [1, [1,3]], [1, [2,3]]]
-        cls.preds = [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]), np.array([1,0,0,0,1]), np.array([1,1,0,1,1]),
-                     np.array([1,1,0,0,1]), np.array([1,1,1,0,1])] + \
-                    [np.array([0 in range(5)]) for i in range(6)]
-        cls.preds2 = [np.array([0 in range(5)]) for i in range(6)] + \
-                    [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]),
-                     np.array([1,0,0,0,1]), np.array([1,1,0,1,1]), np.array([1,1,0,0,1]), np.array([1,1,1,0,1])]
-        cls.classifiersNames = ["chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
-                                "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
-                                "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",]
-        cls.classifiersNames2 = ["cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
-                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
-                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
-                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease"]
-        cls.results = [[flag, [["", [name, "", "", pred]], ["", [name1, "", "", pred1]]], ["", ""]]
-                       for flag, name, pred, name1, pred1 in zip(cls.flags, cls.classifiersNames, cls.preds,
-                                                                 cls.classifiersNames2, cls.preds2)]
-        # cls.results = [[flag, ["", ["", name, "", pred]], ""] for flag, pred, name in
-        #                zip(cls.flags, cls.preds, cls.classifiersNames)]
-        cls.statsIter = 2
-        cls.nbExample = 5
-        cls.nbLabels = 4
-        cls.true_labels = np.array([0,1,2,3,0])
-
-    def test_simple(cls):
-        multiclassResults = ExecClassif.analyzeMulticlass(cls.results, cls.statsIter, cls.nbExample, cls.nbLabels, cls.true_labels, [["accuracy_score"]])
-        np.testing.assert_array_equal(multiclassResults[1]["chicken_is_heaven"]["labels"], cls.true_labels)
-
-class Test_genMetricsScores(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
-        cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
-        cls.multiclassResults = [{"chicken_is_heaven":
-                                      {"labels": cls.multiclass_labels}}]
-        cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
-        cls.metrics = [["accuracy_score"]]
-        cls.score_to_get = accuracy_score(cls.true_labels, cls.multiclass_labels)
-
-    def test_simple(cls):
-        multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
-        cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-
-    def test_multiple_clf(cls):
-        cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
-                                  "cheese_is_no_disease": {"labels": cls.wrong_labels}},
-                                 ]
-        multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
-        cls.assertEqual(0, multiclassResults[0]["cheese_is_no_disease"]["metricsScores"]["accuracy_score"])
-        cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-
-    def test_multiple_metrics(cls):
-        from sklearn.metrics import f1_score
-        cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
-        cls.metrics = [["accuracy_score"], ["f1_score"]]
-        multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
-        cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-        cls.assertEqual(cls.score_to_get_f1, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["f1_score"])
-
-    def test_multiple_iterations(cls):
-        cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels}},
-                                 {"chicken_is_heaven": {"labels": cls.wrong_labels}},
-                                 ]
-        multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
-        cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-        cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-
-    def test_all(cls):
-        cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
-                                                          "cheese_is_no_disease": {"labels": cls.wrong_labels}},
-                                                         {"chicken_is_heaven": {"labels": cls.wrong_labels},
-                                                          "cheese_is_no_disease": {"labels": cls.multiclass_labels}},
-                                                         ]
-        cls.metrics = [["accuracy_score"], ["f1_score"]]
-        from sklearn.metrics import f1_score
-        cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
-        multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
-        cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
-        cls.assertEqual(cls.score_to_get_f1, multiclassResults[1]["cheese_is_no_disease"]["metricsScores"]["f1_score"])
-
-
-class Test_getErrorOnLabels(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
-        cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
-        cls.multiclassResults = [{"chicken_is_heaven":
-                                      {"labels": cls.multiclass_labels}}]
-        cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
-
-    def test_simple(cls):
-        multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
-        np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
-                                      multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
-
-    def test_full(cls):
-        cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
-                                  "cheese_is_no_disease": {"labels": cls.wrong_labels}},
-                                 {"chicken_is_heaven": {"labels": cls.wrong_labels},
-                                  "cheese_is_no_disease": {"labels": cls.wrong_labels}},
-                                 ]
-        multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
-        np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
-                                      multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
-        np.testing.assert_array_equal(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
-                                      multiclassResults[1]["cheese_is_no_disease"]["errorOnExample"])
-
-    def test_type(cls):
-        multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
-        cls.assertEqual(type(multiclassResults[0]["chicken_is_heaven"]["errorOnExample"][0]), np.int64)
-        np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
-                                      multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
+#
+# class Test_analyzeMulticlass(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.flags = [[0, [0,1]], [0, [0,2]], [0, [0,3]], [0, [1,2]], [0, [1,3]], [0, [2,3]],
+#                      [1, [0,1]], [1, [0,2]], [1, [0,3]], [1, [1,2]], [1, [1,3]], [1, [2,3]]]
+#         cls.preds = [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]), np.array([1,0,0,0,1]), np.array([1,1,0,1,1]),
+#                      np.array([1,1,0,0,1]), np.array([1,1,1,0,1])] + \
+#                     [np.array([0 in range(5)]) for i in range(6)]
+#         cls.preds2 = [np.array([0 in range(5)]) for i in range(6)] + \
+#                     [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]),
+#                      np.array([1,0,0,0,1]), np.array([1,1,0,1,1]), np.array([1,1,0,0,1]), np.array([1,1,1,0,1])]
+#         cls.classifiersNames = ["chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
+#                                 "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
+#                                 "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",]
+#         cls.classifiersNames2 = ["cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+#                                 "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+#                                 "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+#                                 "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease"]
+#         cls.results = [[flag, [["", [name, "", "", pred]], ["", [name1, "", "", pred1]]], ["", ""]]
+#                        for flag, name, pred, name1, pred1 in zip(cls.flags, cls.classifiersNames, cls.preds,
+#                                                                  cls.classifiersNames2, cls.preds2)]
+#         # cls.results = [[flag, ["", ["", name, "", pred]], ""] for flag, pred, name in
+#         #                zip(cls.flags, cls.preds, cls.classifiersNames)]
+#         cls.statsIter = 2
+#         cls.nbExample = 5
+#         cls.nbLabels = 4
+#         cls.true_labels = np.array([0,1,2,3,0])
+#         cls.metrics = [["accuracy_score"]]
+#
+#     def test_simple(cls):
+#         multiclassResults = ExecClassif.analyzeMulticlass(cls.results, cls.statsIter, cls.nbExample, cls.nbLabels, cls.true_labels, [["accuracy_score"]])
+#         np.testing.assert_array_equal(multiclassResults[1]["chicken_is_heaven"]["labels"], cls.true_labels)
+#
+# class Test_genMetricsScores(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
+#         cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
+#         cls.multiclassResults = [{"chicken_is_heaven":
+#                                       {"labels": cls.multiclass_labels}}]
+#         cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
+#         cls.metrics = [["accuracy_score"]]
+#         cls.score_to_get = accuracy_score(cls.true_labels, cls.multiclass_labels)
+#
+#     def test_simple(cls):
+#         multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
+#         cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#
+#     def test_multiple_clf(cls):
+#         cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
+#                                   "cheese_is_no_disease": {"labels": cls.wrong_labels}},
+#                                  ]
+#         multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
+#         cls.assertEqual(0, multiclassResults[0]["cheese_is_no_disease"]["metricsScores"]["accuracy_score"])
+#         cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#
+#     def test_multiple_metrics(cls):
+#         from sklearn.metrics import f1_score
+#         cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
+#         cls.metrics = [["accuracy_score"], ["f1_score"]]
+#         multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
+#         cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#         cls.assertEqual(cls.score_to_get_f1, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["f1_score"])
+#
+#     def test_multiple_iterations(cls):
+#         cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels}},
+#                                  {"chicken_is_heaven": {"labels": cls.wrong_labels}},
+#                                  ]
+#         multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
+#         cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#         cls.assertEqual(cls.score_to_get, multiclassResults[0]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#
+#     def test_all(cls):
+#         cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
+#                                                           "cheese_is_no_disease": {"labels": cls.wrong_labels}},
+#                                                          {"chicken_is_heaven": {"labels": cls.wrong_labels},
+#                                                           "cheese_is_no_disease": {"labels": cls.multiclass_labels}},
+#                                                          ]
+#         cls.metrics = [["accuracy_score"], ["f1_score"]]
+#         from sklearn.metrics import f1_score
+#         cls.score_to_get_f1 = f1_score(cls.true_labels, cls.multiclass_labels, average="micro")
+#         multiclassResults = ExecClassif.genMetricsScores(cls.multiclassResults, cls.true_labels, cls.metrics)
+#         cls.assertEqual(0, multiclassResults[1]["chicken_is_heaven"]["metricsScores"]["accuracy_score"])
+#         cls.assertEqual(cls.score_to_get_f1, multiclassResults[1]["cheese_is_no_disease"]["metricsScores"]["f1_score"])
+#
+#
+# class Test_getErrorOnLabels(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.multiclass_labels = np.array([0,1,2,3,4,5,2,1,3])
+#         cls.wrong_labels = np.array([1,3,3,4,5,0,2,4,3])
+#         cls.multiclassResults = [{"chicken_is_heaven":
+#                                       {"labels": cls.multiclass_labels}}]
+#         cls.true_labels = np.array([0,2,2,3,4,5,1,3,2])
+#
+#     def test_simple(cls):
+#         multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
+#         np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
+#                                       multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
+#
+#     def test_full(cls):
+#         cls.multiclassResults = [{"chicken_is_heaven": {"labels": cls.multiclass_labels},
+#                                   "cheese_is_no_disease": {"labels": cls.wrong_labels}},
+#                                  {"chicken_is_heaven": {"labels": cls.wrong_labels},
+#                                   "cheese_is_no_disease": {"labels": cls.wrong_labels}},
+#                                  ]
+#         multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
+#         np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
+#                                       multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
+#         np.testing.assert_array_equal(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
+#                                       multiclassResults[1]["cheese_is_no_disease"]["errorOnExample"])
+#
+#     def test_type(cls):
+#         multiclassResults = ExecClassif.getErrorOnLabels(cls.multiclassResults, cls.true_labels)
+#         cls.assertEqual(type(multiclassResults[0]["chicken_is_heaven"]["errorOnExample"][0]), np.int64)
+#         np.testing.assert_array_equal(np.array([1, 0, 1, 1, 1, 1, 0, 0, 0]),
+#                                       multiclassResults[0]["chicken_is_heaven"]["errorOnExample"])
 #
 # class Essai(unittest.TestCase):
 #