diff --git a/Code/MonoMultiViewClassifiers/ExecClassif.py b/Code/MonoMultiViewClassifiers/ExecClassif.py
index 9d5cdd6f78c7f6fa8b40b9be38f60fb4be5369b4..d98f12f7c6bfab1f4f90df1721023ffab9fef6b4 100644
--- a/Code/MonoMultiViewClassifiers/ExecClassif.py
+++ b/Code/MonoMultiViewClassifiers/ExecClassif.py
@@ -186,29 +186,7 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
     """Used to execute mono and multiview classification and result analysis for one random state
          classification"""
     #TODO : Clarify this one
-    np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",")
-    resultsMonoview = []
-    labelsNames = LABELS_DICTIONARY.values()
-
-    if nbCores > 1:
-        nbExperiments = len(argumentDictionaries["Monoview"])
-        for stepIndex in range(int(math.ceil(float(nbExperiments) / nbCores))):
-            resultsMonoview += (Parallel(n_jobs=nbCores)(
-                delayed(ExecMonoview_multicore)(directory, args.name, labelsNames, classificationIndices, kFolds,
-                                                coreIndex, args.type, args.pathF, randomState,
-                                                hyperParamSearch=hyperParamSearch,
-                                                metrics=metrics, nIter=args.CL_GS_iter,
-                                                **argumentDictionaries["Monoview"][coreIndex + stepIndex * nbCores])
-                for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores))))
 
-    else:
-        resultsMonoview += ([ExecMonoview(directory, DATASET.get("View" + str(arguments["viewIndex"])),
-                                          DATASET.get("Labels").value, args.name, labelsNames,
-                                          classificationIndices, kFolds, 1, args.type, args.pathF, randomState,
-                                          hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=args.CL_GS_iter,
-                                          **arguments)
-                             for arguments in argumentDictionaries["Monoview"]])
-    monoviewTime = time.time() - dataBaseTime - start
 
     argumentDictionaries = initMultiviewArguments(args, benchmark, views, viewsIndices, argumentDictionaries,
                                                   randomState, directory, resultsMonoview, classificationIndices)
@@ -256,11 +234,34 @@ def classifyOneIter(LABELS_DICTIONARY, argumentDictionaries, nbCores, directory,
     return results, labelAnalysis
 
 
-def getResults(results, statsIter, nbMulticlass):
+def analyzeMulticlass(results, statsIter, nbExamples, nbLabels):
+    multiclassResults = [{} for _ in range(statsIter)]
+    for iterIndex in range(statsIter):
+        for flag, resMono, resMulti in results:
+                for classifierResult in resMono:
+                    if classifierResult[1][0] not in multiclassResults[iterIndex]:
+                        multiclassResults[iterIndex][classifierResult[1][0]] = np.zeros((nbExamples, nbLabels)
+                                                                                        , dtype=int)
+                    for exampleIndex, label in enumerate(classifierResult[1][3]):
+                        if label == 1:
+                            multiclassResults[iterIndex][classifierResult[1][0]][exampleIndex, flag[1][0]] += 1
+                        else:
+                            multiclassResults[iterIndex][classifierResult[1][0]][exampleIndex, flag[1][1]] += 1
+
+    for iterIndex, multiclassiterResult in enumerate(multiclassResults):
+        for key, value in multiclassiterResult.items():
+            multiclassResults[iterIndex][key] = {"labels": np.argmax(value, axis=1)}
+    multiclassResults = getMetricsScores(multiclassResults)
+    multiclassResults = getErrorOnLabels(multiclassResults)
+    publishMulticlassResults(multiclassResults)
+    return multiclassResults
+
+
+def getResults(results, statsIter, nbMulticlass, argumentDictionaries):
     if statsIter > 1:
         if nbMulticlass > 1:
-            multiclassResults = analyzeMulticlass(results)
-            analyzerIer(multiclassResults)
+            multiclassResults = analyzeMulticlass(results, statsIter, argumentDictionaries)
+            analyzerIter(multiclassResults)
         else:
             biclassResults = analyzeBiclass(results)
             analyzeIter(biclassResults)
@@ -270,18 +271,21 @@ def getResults(results, statsIter, nbMulticlass):
         else:
             analyzeBiclass(results)
 
-def execOneBenchmark(LABELS_DICTIONARY, directory):
+
+def execOneBenchmark(coreIndex=-1, LABELS_DICTIONARY=None, directory=None, classificationIndices=None, args=None,
+                     kFolds=None, randomState=None, hyperParamSearch=None, metrics=None, argumentDictionaries=None,
+                     benchmark=None, views=None, viewsIndices=None, flag=None, ExecMonoview_multicore=ExecMonoview_multicore,
+                     ExecMultiview_multicore=ExecMultiview_multicore, initMultiviewArguments=initMultiviewArguments):
+    """Used to run a benchmark using one core. ExecMonoview_multicore, initMultiviewArguments and
+     ExecMultiview_multicore args are only used for tests"""
     resultsMonoview = []
-    labelsNames = LABELS_DICTIONARY.values()
+    labelsNames = list(LABELS_DICTIONARY.values())
     np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",")
-
     resultsMonoview += [ExecMonoview_multicore(directory, args.name, labelsNames, classificationIndices, kFolds,
                                                coreIndex, args.type, args.pathF, randomState,
-                                               hyperParamSearch=hyperParamSearch,
-                                               metrics=metrics, nIter=args.CL_GS_iter,
-                                               **arguments)
-                        for arguments in argumentDictionaries["Monoview"]]
-    monoviewTime = time.time() - dataBaseTime - start
+                                               hyperParamSearch=hyperParamSearch, metrics=metrics,
+                                               nIter=args.CL_GS_iter, **argument)
+                        for argument in argumentDictionaries["Monoview"]]
 
     argumentDictionaries = initMultiviewArguments(args, benchmark, views, viewsIndices, argumentDictionaries,
                                                   randomState, directory, resultsMonoview, classificationIndices)
@@ -292,18 +296,53 @@ def execOneBenchmark(LABELS_DICTIONARY, directory):
                                 args.pathF, LABELS_DICTIONARY, randomState, hyperParamSearch=hyperParamSearch,
                                 metrics=metrics, nIter=args.CL_GS_iter, **arguments)
         for arguments in argumentDictionaries["Multiview"]]
-    multiviewTime = time.time() - monoviewTime - dataBaseTime - start
-    return [-1]
+    return [flag, resultsMonoview, resultsMultiview]
+
+
+def execOneBenchmark_multicore(nbCores=-1, LABELS_DICTIONARY=None, directory=None, classificationIndices=None, args=None,
+                               kFolds=None, randomState=None, hyperParamSearch=None, metrics=None, argumentDictionaries=None,
+                               benchmark=None, views=None, viewsIndices=None, flag=None, ExecMonoview_multicore=ExecMonoview_multicore,
+                               ExecMultiview_multicore=ExecMultiview_multicore, initMultiviewArguments=initMultiviewArguments):
+
+    np.savetxt(directory + "train_indices.csv", classificationIndices[0], delimiter=",")
+    resultsMonoview = []
+    labelsNames = list(LABELS_DICTIONARY.values())
+
+    nbExperiments = len(argumentDictionaries["Monoview"])
+    nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
+    for stepIndex in range(nbMulticoreToDo):
+        resultsMonoview += (Parallel(n_jobs=nbCores)(
+            delayed(ExecMonoview_multicore)(directory, args.name, labelsNames, classificationIndices, kFolds,
+                                            coreIndex, args.type, args.pathF, randomState,
+                                            hyperParamSearch=hyperParamSearch,
+                                            metrics=metrics, nIter=args.CL_GS_iter,
+                                            **argumentDictionaries["Monoview"][coreIndex + stepIndex * nbCores])
+            for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores))))
 
+    argumentDictionaries = initMultiviewArguments(args, benchmark, views, viewsIndices, argumentDictionaries,
+                                                  randomState, directory, resultsMonoview, classificationIndices)
+
+    resultsMultiview = []
+    nbExperiments = len(argumentDictionaries["Multiview"])
+    nbMulticoreToDo = int(math.ceil(float(nbExperiments) / nbCores))
+    for stepIndex in range(nbMulticoreToDo):
+        resultsMultiview += Parallel(n_jobs=nbCores)(
+            delayed(ExecMultiview_multicore)(directory, coreIndex, args.name, classificationIndices, kFolds,
+                                             args.type, args.pathF, LABELS_DICTIONARY, randomState,
+                                             hyperParamSearch=hyperParamSearch, metrics=metrics, nIter=args.CL_GS_iter,
+                                             **argumentDictionaries["Multiview"][stepIndex * nbCores + coreIndex])
+            for coreIndex in range(min(nbCores, nbExperiments - stepIndex * nbCores)))
 
-def execOneBenchmark_multicore():
-    return [-1]
+    return [flag, resultsMonoview, resultsMultiview]
 
 
 def execBenchmark(nbCores, statsIter, nbMulticlass, argumentsDictionaries,
                   execOneBenchmark=execOneBenchmark, execOneBenchmark_multicore=execOneBenchmark_multicore):
     """Used to execute the needed benchmark(s) on multicore or mono-core functions
     The execOneBenchmark and execOneBenchmark_multicore keywords args are only used in the tests"""
+    # TODO :  find a way to flag
+
+    logging.debug("Start:\t Executing all the needed biclass benchmarks")
     results = []
     if nbCores > 1:
         if statsIter > 1 or nbMulticlass > 1:
@@ -319,9 +358,14 @@ def execBenchmark(nbCores, statsIter, nbMulticlass, argumentsDictionaries,
     else:
         for arguments in argumentsDictionaries:
             results += [execOneBenchmark(**arguments)]
+    logging.debug("Done:\t Executing all the needed biclass benchmarks")
+
     # Do everything with flagging
 
-    # getResults(results, statsIter, nbMulticlass)
+    logging.debug("Start:\t Analyzing preds")
+    # getResults(preds, statsIter, nbMulticlass)
+    logging.debug("Done:\t Analyzing preds")
+
     return results
 
 
@@ -391,6 +435,7 @@ def execClassif(arguments):
                                             initKWARGS)
     directories = execution.genDirecortiesNames(directory, statsIter, labelsIndices,
                                                 multiclassMethod, LABELS_DICTIONARY)
+    # TODO : Gen arguments dictionaries
 
     if statsIter > 1:
         logging.debug("Start:\t Benchmark classification")
diff --git a/Code/MonoMultiViewClassifiers/Metrics/__init__.py b/Code/MonoMultiViewClassifiers/Metrics/__init__.py
index c5f17c0f8e71715c99885b3355766abc9821e7b0..6182e3c0569c09ee8d0da7681c999977f2d411be 100644
--- a/Code/MonoMultiViewClassifiers/Metrics/__init__.py
+++ b/Code/MonoMultiViewClassifiers/Metrics/__init__.py
@@ -29,5 +29,5 @@ Define a getConfig function
         kwargs : every argument that is specific to the metric
     Output :
         configString : A string that gives the name of the metric and explains how it is configured. Must end by
-                        (lower is better) or (higher is better) to be able to analyze the results
+                        (lower is better) or (higher is better) to be able to analyze the preds
 """
\ No newline at end of file
diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
index 6f38ceac208f6bdf16a30a71b183a740a1bcc819..085b3481b1ef450f0f48b2f67cb11693b0b0363c 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
@@ -161,7 +161,7 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol
     cl_desc = [value for key, value in sorted(clKWARGS.items())]
     logging.debug("Done:\t Getting Results")
 
-    logging.debug("Start:\t Saving results")
+    logging.debug("Start:\t Saving preds")
     saveResults(stringAnalysis, outputFileName, full_labels_pred, y_train_pred, y_train, imagesAnalysis)
     logging.info("Done:\t Saving Results")
 
diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExecPlot.py b/Code/MonoMultiViewClassifiers/Monoview/ExecPlot.py
index 637cb4ede9752e07f0153bbf8eea92f2ee699ab6..2ea2f1d75e6579a74a2f0824a283174576081b6d 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/ExecPlot.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/ExecPlot.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-""" Script whichs helps to replot results from Feature Parameter Optimisation """
+""" Script whichs helps to replot preds from Feature Parameter Optimisation """
 
 # Import built-in modules
 import argparse  # for acommand line arguments
@@ -22,7 +22,7 @@ __status__ = "Prototype"  # Production, Development, Prototype
 __date__ = 2016 - 03 - 25
 
 parser = argparse.ArgumentParser(
-    description='This method can be used to replot results from Feature Parameter Optimisation',
+    description='This method can be used to replot preds from Feature Parameter Optimisation',
     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
 args = parser.parse_args()
 args.name = "Caltech"
diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExportResults.py b/Code/MonoMultiViewClassifiers/Monoview/ExportResults.py
index 7df50125ce4b055ba4bcb37df60be99de45b3395..dedcf5e6735ee00e0dfcfa4bf6b11ed5fad2e0e6 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/ExportResults.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/ExportResults.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-""" Library: Functions to export results to CSV or plots """
+""" Library: Functions to export preds to CSV or plots """
 
 # Import built-in modules
 import os  # for iteration throug directories
diff --git a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
index 519a8bf2b0d6422f221ff1fdd4ea1185b975678c..5dfaf35115d4ab9ffac05d5b9f82a54eddd942bb 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
@@ -141,9 +141,9 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
                                                                            viewsIndices, randomState)
     logging.info("Done:\t Result Analysis for " + CL_type)
 
-    logging.debug("Start:\t Saving results")
+    logging.debug("Start:\t Saving preds")
     saveResults(LABELS_DICTIONARY, stringAnalysis, views, classifierModule, classificationKWARGS, directory,
                 learningRate, name, imagesAnalysis)
-    logging.debug("Start:\t Saving results")
+    logging.debug("Start:\t Saving preds")
 
     return CL_type, classificationKWARGS, metricsScores, fullLabels
diff --git a/Code/MonoMultiViewClassifiers/ResultAnalysis.py b/Code/MonoMultiViewClassifiers/ResultAnalysis.py
index 49736f02cc7fd2840a7e2011b5d79f2821c8177f..6e1ba8f21f422cdc8d36a64ee7cb7f8a15cf16a9 100644
--- a/Code/MonoMultiViewClassifiers/ResultAnalysis.py
+++ b/Code/MonoMultiViewClassifiers/ResultAnalysis.py
@@ -39,7 +39,7 @@ def genFusionName(type_, a, b, c):
 
 
 def genNamesFromRes(mono, multi):
-    """Used to generate classifiers names list (inthe right order) from mono- and multi-view results"""
+    """Used to generate classifiers names list (inthe right order) from mono- and multi-view preds"""
     names = [res[1][0] + "-" + res[1][1][-1] for res in mono]
     names += [type_ if type_ != "Fusion" else genFusionName(type_, a, b, c) for type_, a, b, c in multi]
     return names
diff --git a/Code/MonoMultiViewClassifiers/utils/GetMultiviewDb.py b/Code/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
index 41b272a1d0dffc1ab5f08f781c6849f3cab4881b..52893cf9c8256e5535ce39c1382d96a8f5d2badc 100644
--- a/Code/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
+++ b/Code/MonoMultiViewClassifiers/utils/GetMultiviewDb.py
@@ -557,7 +557,7 @@ def getClassicDBcsv(views, pathF, nameDB, NB_CLASS, askedLabelsNames, randomStat
 #                                 if arrayLen == (nbBins - 1) * (lenBin - overlapping) + lenBin:
 #                                     results.append({"nbBins": nbBins, "overlapping": overlapping, "lenBin": lenBin})
 #                                     if len(results) == maxNbSolutions:
-#                                         params = results[randomState.randrange(len(results))]
+#                                         params = preds[randomState.randrange(len(preds))]
 #                                         return params
 #
 #
diff --git a/Code/MonoMultiViewClassifiers/utils/execution.py b/Code/MonoMultiViewClassifiers/utils/execution.py
index f4ef979aea9674762552df637a456496dc8ebff6..d901955331311543af407335d064daae1a306794 100644
--- a/Code/MonoMultiViewClassifiers/utils/execution.py
+++ b/Code/MonoMultiViewClassifiers/utils/execution.py
@@ -63,7 +63,7 @@ def parseTheArgs(arguments):
                             help='Determine which multiview classifier to use if empty, considering all',
                             default=[''])
     groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
-                            help="Number of iteration for each algorithm to mean results on different random states. "
+                            help="Number of iteration for each algorithm to mean preds on different random states. "
                                  "If using multiple cores, it's highly recommended to use statsiter mod nbCores == 0",
                             type=int,
                             default=2)
@@ -211,7 +211,7 @@ def initRandomState(randomStateArg, directory):
 
 
 def initLogFile(args):
-    """Used to init the directory where the results will be stored and the log file"""
+    """Used to init the directory where the preds will be stored and the log file"""
     resultDirectory = "../Results/" + args.name + "/started_" + time.strftime("%Y_%m_%d-%H_%M") + "/"
     logFileName = time.strftime("%Y%m%d-%H%M%S") + "-" + ''.join(args.CL_type) + "-" + "_".join(
         args.views) + "-" + args.name + "-LOG"
diff --git a/Code/Tests/test_ExecClassif.py b/Code/Tests/test_ExecClassif.py
index b227bf17becc1cb7dcd8308f60aceb36af69545c..69f4b1477389ea711b6421162dd7716c7ae18174 100644
--- a/Code/Tests/test_ExecClassif.py
+++ b/Code/Tests/test_ExecClassif.py
@@ -1,6 +1,7 @@
 import unittest
 import argparse
-import time
+import os
+import numpy as np
 
 from ..MonoMultiViewClassifiers import ExecClassif
 
@@ -34,11 +35,11 @@ class Test_initMonoviewArguments(unittest.TestCase):
         arguments = ExecClassif.initMonoviewExps(benchmark, {}, {}, 0, {})
 
 
-def fakeBenchmarkExec(coreIndex, a):
+def fakeBenchmarkExec(coreIndex=-1, a=7):
     return [coreIndex, a]
 
 
-def fakeBenchmarkExec_mutlicore(nbCores, a):
+def fakeBenchmarkExec_mutlicore(nbCores=-1, a=6):
     return [nbCores,a]
 
 
@@ -72,188 +73,310 @@ class Test_execBenchmark(unittest.TestCase):
         cls.assertEqual(res, [[2,4]])
 
 
+def fakeExecMono(directory, name, labelsNames, classificationIndices, kFolds, coreIndex, type, pathF, randomState,
+                 hyperParamSearch="try", metrics="try", nIter=1, **arguments):
+    return ["Mono", arguments]
 
 
+def fakeExecMulti(directory, coreIndex, name, classificationIndices, kFolds, type, pathF, LABELS_DICTIONARY,
+                  randomState, hyperParamSearch="",metrics=None, nIter=1, **arguments):
+    return ["Multi", arguments]
 
 
+def fakeInitMulti(args, benchmark, views, viewsIndices, argumentDictionaries,
+                  randomState, directory, resultsMonoview, classificationIndices):
+    return {"Monoview": [{"try": 0}, {"try2": 100}], "Multiview": [{"try3": 5}, {"try4": 10}]}
 
-class Essai(unittest.TestCase):
-
-    def setUp(self):
-        parser = argparse.ArgumentParser(
-            description='This file is used to benchmark the scores fo multiple classification algorithm on multiview data.',
-            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
-        groupStandard = parser.add_argument_group('Standard arguments')
-        groupStandard.add_argument('-log', action='store_true', help='Use option to activate Logging to Console')
-        groupStandard.add_argument('--name', metavar='STRING', action='store', help='Name of Database (default: %(default)s)',
-                                   default='Plausible')
-        groupStandard.add_argument('--type', metavar='STRING', action='store',
-                                   help='Type of database : .hdf5 or .csv (default: %(default)s)',
-                                   default='.hdf5')
-        groupStandard.add_argument('--views', metavar='STRING', action='store', nargs="+",
-                                   help='Name of the views selected for learning (default: %(default)s)',
-                                   default=[''])
-        groupStandard.add_argument('--pathF', metavar='STRING', action='store', help='Path to the views (default: %(default)s)',
-                                   default='/home/bbauvin/Documents/Data/Data_multi_omics/')
-        groupStandard.add_argument('--nice', metavar='INT', action='store', type=int,
-                                   help='Niceness for the process', default=0)
-        groupStandard.add_argument('--randomState', metavar='STRING', action='store',
-                                   help="The random state seed to use or a file where we can find it's get_state", default=None)
-
-        groupClass = parser.add_argument_group('Classification arguments')
-        groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
-                                help='Determine the split between learning and validation sets', type=float,
-                                default=0.2)
-        groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
-                                type=int, default=2)
-        groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
-                                default=2)
-        groupClass.add_argument('--CL_classes', metavar='STRING', action='store', nargs="+",
-                                help='Classes used in the dataset (names of the folders) if not filled, random classes will be '
-                                     'selected ex. walrus mole leopard', default=["yes", "no"])
-        groupClass.add_argument('--CL_type', metavar='STRING', action='store', nargs="+",
-                                help='Determine whether to use Multiview and/or Monoview, or Benchmark',
-                                default=['Benchmark'])
-        groupClass.add_argument('--CL_algos_monoview', metavar='STRING', action='store', nargs="+",
-                                help='Determine which monoview classifier to use if empty, considering all',
-                                default=[''])
-        groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store', nargs="+",
-                                help='Determine which multiview classifier to use if empty, considering all',
-                                default=[''])
-        groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
-                                default=2)
-        groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
-                                help="Number of iteration for each algorithm to mean results if using multiple cores, it's highly recommended to use statsiter mod(nbCores) = 0",
-                                type=int,
-                                default=2)
-        groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+",
-                                help='Determine which metrics to use, separate metric and configuration with ":".'
-                                     ' If multiple, separate with space. If no metric is specified, '
-                                     'considering all with accuracy for classification '
-                                , default=[''])
-        groupClass.add_argument('--CL_metric_princ', metavar='STRING', action='store',
-                                help='Determine which metric to use for randomSearch and optimization', default="f1_score")
-        groupClass.add_argument('--CL_GS_iter', metavar='INT', action='store',
-                                help='Determine how many Randomized grid search tests to do', type=int, default=2)
-        groupClass.add_argument('--CL_HPS_type', metavar='STRING', action='store',
-                                help='Determine which hyperparamter search function use', default="randomizedSearch")
-
-        groupRF = parser.add_argument_group('Random Forest arguments')
-        groupRF.add_argument('--CL_RandomForest_trees', metavar='INT', type=int, action='store', help='Number max trees',
-                             default=25)
-        groupRF.add_argument('--CL_RandomForest_max_depth', metavar='INT', type=int, action='store',
-                             help='Max depth for the trees',
-                             default=5)
-        groupRF.add_argument('--CL_RandomForest_criterion', metavar='STRING', action='store', help='Criterion for the trees',
-                             default="entropy")
-
-        groupSVMLinear = parser.add_argument_group('Linear SVM arguments')
-        groupSVMLinear.add_argument('--CL_SVMLinear_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
-                                    default=1)
-
-        groupSVMRBF = parser.add_argument_group('SVW-RBF arguments')
-        groupSVMRBF.add_argument('--CL_SVMRBF_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
-                                 default=1)
-
-        groupSVMPoly = parser.add_argument_group('Poly SVM arguments')
-        groupSVMPoly.add_argument('--CL_SVMPoly_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
-                                  default=1)
-        groupSVMPoly.add_argument('--CL_SVMPoly_deg', metavar='INT', type=int, action='store', help='Degree parameter used',
-                                  default=2)
-
-        groupAdaboost = parser.add_argument_group('Adaboost arguments')
-        groupAdaboost.add_argument('--CL_Adaboost_n_est', metavar='INT', type=int, action='store', help='Number of estimators',
-                                   default=2)
-        groupAdaboost.add_argument('--CL_Adaboost_b_est', metavar='STRING', action='store', help='Estimators',
-                                   default='DecisionTreeClassifier')
-
-        groupDT = parser.add_argument_group('Decision Trees arguments')
-        groupDT.add_argument('--CL_DecisionTree_depth', metavar='INT', type=int, action='store',
-                             help='Determine max depth for Decision Trees', default=3)
-        groupDT.add_argument('--CL_DecisionTree_criterion', metavar='STRING', action='store',
-                             help='Determine max depth for Decision Trees', default="entropy")
-        groupDT.add_argument('--CL_DecisionTree_splitter', metavar='STRING', action='store',
-                             help='Determine criterion for Decision Trees', default="random")
-
-        groupSGD = parser.add_argument_group('SGD arguments')
-        groupSGD.add_argument('--CL_SGD_alpha', metavar='FLOAT', type=float, action='store',
-                              help='Determine alpha for SGDClassifier', default=0.1)
-        groupSGD.add_argument('--CL_SGD_loss', metavar='STRING', action='store',
-                              help='Determine loss for SGDClassifier', default='log')
-        groupSGD.add_argument('--CL_SGD_penalty', metavar='STRING', action='store',
-                              help='Determine penalty for SGDClassifier', default='l2')
-
-        groupKNN = parser.add_argument_group('KNN arguments')
-        groupKNN.add_argument('--CL_KNN_neigh', metavar='INT', type=int, action='store',
-                              help='Determine number of neighbors for KNN', default=1)
-        groupKNN.add_argument('--CL_KNN_weights', metavar='STRING', action='store',
-                              help='Determine number of neighbors for KNN', default="distance")
-        groupKNN.add_argument('--CL_KNN_algo', metavar='STRING', action='store',
-                              help='Determine number of neighbors for KNN', default="auto")
-        groupKNN.add_argument('--CL_KNN_p', metavar='INT', type=int, action='store',
-                              help='Determine number of neighbors for KNN', default=1)
-
-        groupSCM = parser.add_argument_group('SCM arguments')
-        groupSCM.add_argument('--CL_SCM_max_rules', metavar='INT', type=int, action='store',
-                              help='Max number of rules for SCM', default=1)
-        groupSCM.add_argument('--CL_SCM_p', metavar='FLOAT', type=float, action='store',
-                              help='Max number of rules for SCM', default=1.0)
-        groupSCM.add_argument('--CL_SCM_model_type', metavar='STRING', action='store',
-                              help='Max number of rules for SCM', default="conjunction")
-
-        groupMumbo = parser.add_argument_group('Mumbo arguments')
-        groupMumbo.add_argument('--MU_types', metavar='STRING', action='store', nargs="+",
-                                help='Determine which monoview classifier to use with Mumbo',
-                                default=[''])
-        groupMumbo.add_argument('--MU_config', metavar='STRING', action='store', nargs='+',
-                                help='Configuration for the monoview classifier in Mumbo separate each classifier with sapce and each argument with:',
-                                default=[''])
-        groupMumbo.add_argument('--MU_iter', metavar='INT', action='store', nargs=3,
-                                help='Max number of iteration, min number of iteration, convergence threshold', type=float,
-                                default=[10, 1, 0.01])
-        groupMumbo.add_argument('--MU_combination', action='store_true',
-                                help='Try all the monoview classifiers combinations for each view',
-                                default=False)
-
-
-        groupFusion = parser.add_argument_group('Fusion arguments')
-        groupFusion.add_argument('--FU_types', metavar='STRING', action='store', nargs="+",
-                                 help='Determine which type of fusion to use',
-                                 default=[''])
-        groupEarlyFusion = parser.add_argument_group('Early Fusion arguments')
-        groupEarlyFusion.add_argument('--FU_early_methods', metavar='STRING', action='store', nargs="+",
-                                      help='Determine which early fusion method of fusion to use',
-                                      default=[''])
-        groupEarlyFusion.add_argument('--FU_E_method_configs', metavar='STRING', action='store', nargs='+',
-                                      help='Configuration for the early fusion methods separate '
-                                           'method by space and values by :',
-                                      default=[''])
-        groupEarlyFusion.add_argument('--FU_E_cl_config', metavar='STRING', action='store', nargs='+',
-                                      help='Configuration for the monoview classifiers used separate classifier by space '
-                                           'and configs must be of form argument1_name:value,argument2_name:value',
-                                      default=[''])
-        groupEarlyFusion.add_argument('--FU_E_cl_names', metavar='STRING', action='store', nargs='+',
-                                      help='Name of the classifiers used for each early fusion method', default=[''])
-
-        groupLateFusion = parser.add_argument_group('Late Early Fusion arguments')
-        groupLateFusion.add_argument('--FU_late_methods', metavar='STRING', action='store', nargs="+",
-                                     help='Determine which late fusion method of fusion to use',
-                                     default=[''])
-        groupLateFusion.add_argument('--FU_L_method_config', metavar='STRING', action='store', nargs='+',
-                                     help='Configuration for the fusion method', default=[''])
-        groupLateFusion.add_argument('--FU_L_cl_config', metavar='STRING', action='store', nargs='+',
-                                     help='Configuration for the monoview classifiers used', default=[''])
-        groupLateFusion.add_argument('--FU_L_cl_names', metavar='STRING', action='store', nargs="+",
-                                     help='Names of the classifier used for late fusion', default=[''])
-        groupLateFusion.add_argument('--FU_L_select_monoview', metavar='STRING', action='store',
-                                     help='Determine which method to use to select the monoview classifiers',
-                                     default="intersect")
-        self.args = parser.parse_args([])
-
-
-def suite():
-    suite = unittest.TestSuite()
-    suite.addTest(Test_initBenchmark('test_initKWARGSFunc_no_monoview'))
-    # suite.addTest(WidgetTestCase('test_widget_resize'))
-    return suite
\ No newline at end of file
+
+class FakeArg(object):
+    def __init__(self):
+        self.name = "chicken_is_heaven"
+        self.type = "type"
+        self.pathF = "pathF"
+        self.CL_GS_iter = 1
+
+
+class Test_execOneBenchmark(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+
+        os.mkdir("Code/Tests/tmp_tests")
+
+    def test_simple(cls):
+        flag, resMono, resMulti = ExecClassif.execOneBenchmark(coreIndex=10,
+                                                               LABELS_DICTIONARY={0: "a", 1: "b"},
+                                                               directory="Code/Tests/tmp_tests/",
+                                                               classificationIndices=([1,2,3,4], [0,10,20,30,40]),
+                                                               args=FakeArg(),
+                                                               kFolds="try",
+                                                               randomState="try",
+                                                               hyperParamSearch="try",
+                                                               metrics="try",
+                                                               argumentDictionaries={"Monoview": [{"try": 0}, {"try2": 100}]},
+                                                               benchmark="try",
+                                                               views="try",
+                                                               viewsIndices="try",
+                                                               flag=None,
+                                                               ExecMonoview_multicore=fakeExecMono,
+                                                               ExecMultiview_multicore=fakeExecMulti,
+                                                               initMultiviewArguments=fakeInitMulti)
+
+        cls.assertEqual(flag, None)
+        cls.assertEqual(resMono, [["Mono", {"try": 0}], ["Mono", {"try2": 100}]])
+        cls.assertEqual(resMulti, [["Multi", {"try3": 5}], ["Multi", {"try4": 10}]])
+
+    @classmethod
+    def tearDownClass(cls):
+        os.remove("Code/Tests/tmp_tests/train_indices.csv")
+        os.rmdir("Code/Tests/tmp_tests")
+
+
+class Test_execOneBenchmark_multicore(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        os.mkdir("Code/Tests/tmp_tests")
+
+    def test_simple(cls):
+        flag, resMono, resMulti = ExecClassif.execOneBenchmark_multicore(nbCores=2,
+                                                               LABELS_DICTIONARY={0: "a", 1: "b"},
+                                                               directory="Code/Tests/tmp_tests/",
+                                                               classificationIndices=([1,2,3,4], [0,10,20,30,40]),
+                                                               args=FakeArg(),
+                                                               kFolds="try",
+                                                               randomState="try",
+                                                               hyperParamSearch="try",
+                                                               metrics="try",
+                                                               argumentDictionaries={"Monoview": [{"try": 0}, {"try2": 100}]},
+                                                               benchmark="try",
+                                                               views="try",
+                                                               viewsIndices="try",
+                                                               flag=None,
+                                                               ExecMonoview_multicore=fakeExecMono,
+                                                               ExecMultiview_multicore=fakeExecMulti,
+                                                               initMultiviewArguments=fakeInitMulti)
+
+        cls.assertEqual(flag, None)
+        cls.assertEqual(resMono, [["Mono", {"try": 0}], ["Mono", {"try2": 100}]])
+        cls.assertEqual(resMulti, [["Multi", {"try3": 5}], ["Multi", {"try4": 10}]])
+
+    @classmethod
+    def tearDownClass(cls):
+        os.remove("Code/Tests/tmp_tests/train_indices.csv")
+        os.rmdir("Code/Tests/tmp_tests")
+
+class Test_analyzeMulticlass(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.flags = [[0, [0,1]], [0, [0,2]], [0, [0,3]], [0, [1,2]], [0, [1,3]], [0, [2,3]],
+                     [1, [0,1]], [1, [0,2]], [1, [0,3]], [1, [1,2]], [1, [1,3]], [1, [2,3]]]
+        cls.preds = [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]), np.array([1,0,0,0,1]), np.array([1,1,0,1,1]),
+                     np.array([1,1,0,0,1]), np.array([1,1,1,0,1])] + \
+                    [np.array([0 in range(5)]) for i in range(6)]
+        cls.preds2 = [np.array([0 in range(5)]) for i in range(6)] + \
+                    [np.array([1, 0, 1, 1, 1]), np.array([1,0,0,1,1]),
+                     np.array([1,0,0,0,1]), np.array([1,1,0,1,1]), np.array([1,1,0,0,1]), np.array([1,1,1,0,1])]
+        cls.classifiersNames = ["chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
+                                "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",
+                                "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven", "chicken_is_heaven",]
+        cls.classifiersNames2 = ["cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease",
+                                "cheese_is_no_disease", "cheese_is_no_disease", "cheese_is_no_disease"]
+        cls.results = [[flag, [["", [name, "", "", pred]], ["", [name1, "", "", pred1]]], ["", ""]]
+                       for flag, name, pred, name1, pred1 in zip(cls.flags, cls.classifiersNames, cls.preds,
+                                                                 cls.classifiersNames2, cls.preds2)]
+        # cls.results = [[flag, ["", ["", name, "", pred]], ""] for flag, pred, name in
+        #                zip(cls.flags, cls.preds, cls.classifiersNames)]
+        cls.statsIter = 2
+        cls.nbExample = 5
+        cls.nbLabels = 4
+
+    def test_simple(cls):
+        multiclassResults = ExecClassif.analyzeMulticlass(cls.results, cls.statsIter, cls.nbExample, cls.nbLabels)
+        np.testing.assert_array_equal(multiclassResults[1]["chicken_is_heaven"], np.array([0,1,2,3,0]))
+
+
+#
+# class Essai(unittest.TestCase):
+#
+#     def setUp(self):
+#         parser = argparse.ArgumentParser(
+#             description='This file is used to benchmark the scores fo multiple classification algorithm on multiview data.',
+#             formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+#
+#         groupStandard = parser.add_argument_group('Standard arguments')
+#         groupStandard.add_argument('-log', action='store_true', help='Use option to activate Logging to Console')
+#         groupStandard.add_argument('--name', metavar='STRING', action='store', help='Name of Database (default: %(default)s)',
+#                                    default='Plausible')
+#         groupStandard.add_argument('--type', metavar='STRING', action='store',
+#                                    help='Type of database : .hdf5 or .csv (default: %(default)s)',
+#                                    default='.hdf5')
+#         groupStandard.add_argument('--views', metavar='STRING', action='store', nargs="+",
+#                                    help='Name of the views selected for learning (default: %(default)s)',
+#                                    default=[''])
+#         groupStandard.add_argument('--pathF', metavar='STRING', action='store', help='Path to the views (default: %(default)s)',
+#                                    default='/home/bbauvin/Documents/Data/Data_multi_omics/')
+#         groupStandard.add_argument('--nice', metavar='INT', action='store', type=int,
+#                                    help='Niceness for the process', default=0)
+#         groupStandard.add_argument('--randomState', metavar='STRING', action='store',
+#                                    help="The random state seed to use or a file where we can find it's get_state", default=None)
+#
+#         groupClass = parser.add_argument_group('Classification arguments')
+#         groupClass.add_argument('--CL_split', metavar='FLOAT', action='store',
+#                                 help='Determine the split between learning and validation sets', type=float,
+#                                 default=0.2)
+#         groupClass.add_argument('--CL_nbFolds', metavar='INT', action='store', help='Number of folds in cross validation',
+#                                 type=int, default=2)
+#         groupClass.add_argument('--CL_nb_class', metavar='INT', action='store', help='Number of classes, -1 for all', type=int,
+#                                 default=2)
+#         groupClass.add_argument('--CL_classes', metavar='STRING', action='store', nargs="+",
+#                                 help='Classes used in the dataset (names of the folders) if not filled, random classes will be '
+#                                      'selected ex. walrus mole leopard', default=["yes", "no"])
+#         groupClass.add_argument('--CL_type', metavar='STRING', action='store', nargs="+",
+#                                 help='Determine whether to use Multiview and/or Monoview, or Benchmark',
+#                                 default=['Benchmark'])
+#         groupClass.add_argument('--CL_algos_monoview', metavar='STRING', action='store', nargs="+",
+#                                 help='Determine which monoview classifier to use if empty, considering all',
+#                                 default=[''])
+#         groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store', nargs="+",
+#                                 help='Determine which multiview classifier to use if empty, considering all',
+#                                 default=[''])
+#         groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
+#                                 default=2)
+#         groupClass.add_argument('--CL_statsiter', metavar='INT', action='store',
+#                                 help="Number of iteration for each algorithm to mean preds if using multiple cores, it's highly recommended to use statsiter mod(nbCores) = 0",
+#                                 type=int,
+#                                 default=2)
+#         groupClass.add_argument('--CL_metrics', metavar='STRING', action='store', nargs="+",
+#                                 help='Determine which metrics to use, separate metric and configuration with ":".'
+#                                      ' If multiple, separate with space. If no metric is specified, '
+#                                      'considering all with accuracy for classification '
+#                                 , default=[''])
+#         groupClass.add_argument('--CL_metric_princ', metavar='STRING', action='store',
+#                                 help='Determine which metric to use for randomSearch and optimization', default="f1_score")
+#         groupClass.add_argument('--CL_GS_iter', metavar='INT', action='store',
+#                                 help='Determine how many Randomized grid search tests to do', type=int, default=2)
+#         groupClass.add_argument('--CL_HPS_type', metavar='STRING', action='store',
+#                                 help='Determine which hyperparamter search function use', default="randomizedSearch")
+#
+#         groupRF = parser.add_argument_group('Random Forest arguments')
+#         groupRF.add_argument('--CL_RandomForest_trees', metavar='INT', type=int, action='store', help='Number max trees',
+#                              default=25)
+#         groupRF.add_argument('--CL_RandomForest_max_depth', metavar='INT', type=int, action='store',
+#                              help='Max depth for the trees',
+#                              default=5)
+#         groupRF.add_argument('--CL_RandomForest_criterion', metavar='STRING', action='store', help='Criterion for the trees',
+#                              default="entropy")
+#
+#         groupSVMLinear = parser.add_argument_group('Linear SVM arguments')
+#         groupSVMLinear.add_argument('--CL_SVMLinear_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
+#                                     default=1)
+#
+#         groupSVMRBF = parser.add_argument_group('SVW-RBF arguments')
+#         groupSVMRBF.add_argument('--CL_SVMRBF_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
+#                                  default=1)
+#
+#         groupSVMPoly = parser.add_argument_group('Poly SVM arguments')
+#         groupSVMPoly.add_argument('--CL_SVMPoly_C', metavar='INT', type=int, action='store', help='Penalty parameter used',
+#                                   default=1)
+#         groupSVMPoly.add_argument('--CL_SVMPoly_deg', metavar='INT', type=int, action='store', help='Degree parameter used',
+#                                   default=2)
+#
+#         groupAdaboost = parser.add_argument_group('Adaboost arguments')
+#         groupAdaboost.add_argument('--CL_Adaboost_n_est', metavar='INT', type=int, action='store', help='Number of estimators',
+#                                    default=2)
+#         groupAdaboost.add_argument('--CL_Adaboost_b_est', metavar='STRING', action='store', help='Estimators',
+#                                    default='DecisionTreeClassifier')
+#
+#         groupDT = parser.add_argument_group('Decision Trees arguments')
+#         groupDT.add_argument('--CL_DecisionTree_depth', metavar='INT', type=int, action='store',
+#                              help='Determine max depth for Decision Trees', default=3)
+#         groupDT.add_argument('--CL_DecisionTree_criterion', metavar='STRING', action='store',
+#                              help='Determine max depth for Decision Trees', default="entropy")
+#         groupDT.add_argument('--CL_DecisionTree_splitter', metavar='STRING', action='store',
+#                              help='Determine criterion for Decision Trees', default="random")
+#
+#         groupSGD = parser.add_argument_group('SGD arguments')
+#         groupSGD.add_argument('--CL_SGD_alpha', metavar='FLOAT', type=float, action='store',
+#                               help='Determine alpha for SGDClassifier', default=0.1)
+#         groupSGD.add_argument('--CL_SGD_loss', metavar='STRING', action='store',
+#                               help='Determine loss for SGDClassifier', default='log')
+#         groupSGD.add_argument('--CL_SGD_penalty', metavar='STRING', action='store',
+#                               help='Determine penalty for SGDClassifier', default='l2')
+#
+#         groupKNN = parser.add_argument_group('KNN arguments')
+#         groupKNN.add_argument('--CL_KNN_neigh', metavar='INT', type=int, action='store',
+#                               help='Determine number of neighbors for KNN', default=1)
+#         groupKNN.add_argument('--CL_KNN_weights', metavar='STRING', action='store',
+#                               help='Determine number of neighbors for KNN', default="distance")
+#         groupKNN.add_argument('--CL_KNN_algo', metavar='STRING', action='store',
+#                               help='Determine number of neighbors for KNN', default="auto")
+#         groupKNN.add_argument('--CL_KNN_p', metavar='INT', type=int, action='store',
+#                               help='Determine number of neighbors for KNN', default=1)
+#
+#         groupSCM = parser.add_argument_group('SCM arguments')
+#         groupSCM.add_argument('--CL_SCM_max_rules', metavar='INT', type=int, action='store',
+#                               help='Max number of rules for SCM', default=1)
+#         groupSCM.add_argument('--CL_SCM_p', metavar='FLOAT', type=float, action='store',
+#                               help='Max number of rules for SCM', default=1.0)
+#         groupSCM.add_argument('--CL_SCM_model_type', metavar='STRING', action='store',
+#                               help='Max number of rules for SCM', default="conjunction")
+#
+#         groupMumbo = parser.add_argument_group('Mumbo arguments')
+#         groupMumbo.add_argument('--MU_types', metavar='STRING', action='store', nargs="+",
+#                                 help='Determine which monoview classifier to use with Mumbo',
+#                                 default=[''])
+#         groupMumbo.add_argument('--MU_config', metavar='STRING', action='store', nargs='+',
+#                                 help='Configuration for the monoview classifier in Mumbo separate each classifier with sapce and each argument with:',
+#                                 default=[''])
+#         groupMumbo.add_argument('--MU_iter', metavar='INT', action='store', nargs=3,
+#                                 help='Max number of iteration, min number of iteration, convergence threshold', type=float,
+#                                 default=[10, 1, 0.01])
+#         groupMumbo.add_argument('--MU_combination', action='store_true',
+#                                 help='Try all the monoview classifiers combinations for each view',
+#                                 default=False)
+#
+#
+#         groupFusion = parser.add_argument_group('Fusion arguments')
+#         groupFusion.add_argument('--FU_types', metavar='STRING', action='store', nargs="+",
+#                                  help='Determine which type of fusion to use',
+#                                  default=[''])
+#         groupEarlyFusion = parser.add_argument_group('Early Fusion arguments')
+#         groupEarlyFusion.add_argument('--FU_early_methods', metavar='STRING', action='store', nargs="+",
+#                                       help='Determine which early fusion method of fusion to use',
+#                                       default=[''])
+#         groupEarlyFusion.add_argument('--FU_E_method_configs', metavar='STRING', action='store', nargs='+',
+#                                       help='Configuration for the early fusion methods separate '
+#                                            'method by space and values by :',
+#                                       default=[''])
+#         groupEarlyFusion.add_argument('--FU_E_cl_config', metavar='STRING', action='store', nargs='+',
+#                                       help='Configuration for the monoview classifiers used separate classifier by space '
+#                                            'and configs must be of form argument1_name:value,argument2_name:value',
+#                                       default=[''])
+#         groupEarlyFusion.add_argument('--FU_E_cl_names', metavar='STRING', action='store', nargs='+',
+#                                       help='Name of the classifiers used for each early fusion method', default=[''])
+#
+#         groupLateFusion = parser.add_argument_group('Late Early Fusion arguments')
+#         groupLateFusion.add_argument('--FU_late_methods', metavar='STRING', action='store', nargs="+",
+#                                      help='Determine which late fusion method of fusion to use',
+#                                      default=[''])
+#         groupLateFusion.add_argument('--FU_L_method_config', metavar='STRING', action='store', nargs='+',
+#                                      help='Configuration for the fusion method', default=[''])
+#         groupLateFusion.add_argument('--FU_L_cl_config', metavar='STRING', action='store', nargs='+',
+#                                      help='Configuration for the monoview classifiers used', default=[''])
+#         groupLateFusion.add_argument('--FU_L_cl_names', metavar='STRING', action='store', nargs="+",
+#                                      help='Names of the classifier used for late fusion', default=[''])
+#         groupLateFusion.add_argument('--FU_L_select_monoview', metavar='STRING', action='store',
+#                                      help='Determine which method to use to select the monoview classifiers',
+#                                      default="intersect")
+#         self.args = parser.parse_args([])
+
+
+# def suite():
+#     suite = unittest.TestSuite()
+#     suite.addTest(Test_initBenchmark('test_initKWARGSFunc_no_monoview'))
+#     # suite.addTest(WidgetTestCase('test_widget_resize'))
+#     return suite
\ No newline at end of file