diff --git a/config_files/config.ini b/config_files/config.ini
deleted file mode 100644
index fa2c84d9f058c2b605dbabfb14e444e9584e7bde..0000000000000000000000000000000000000000
--- a/config_files/config.ini
+++ /dev/null
@@ -1,142 +0,0 @@
-# The base configuration of the benchamrk
-[Base]
-log =           bool ; yes
-name =          list_str ; Plausible
-label =         str ; _
-type =          str ; .hdf5
-views =         list_str ; all
-pathF =         str ; ../Data/
-nice =          int ; 0
-randomState =   str ; 42
-nbCores =       int ; 1
-full =          bool ; yes
-debug =         bool ; yes
-add_noise =     bool ; yes
-noise_std =     list_float ; 0.0
-res_dir =       str ; ../Results/
-
-
-# All the classification-realted configuration options
-[Classification]
-multiclassMethod = str ; oneVersusOne
-split = float ; 0.8
-nbFolds = int ; 2
-nbClass = int ; 2
-classes = list_str ; yes no
-type = list_str ; multiview
-algos_monoview = list_str ; all
-algos_multiview = list_str ; all
-statsiter = int ; 2
-metrics = list_str ; accuracy_score f1_score
-metric_princ = str ; f1_score
-HPS_type = str ; randomized_search
-HPS_iter = int ; 2
-
-
-
-#####################################
-# The Monoview Classifier arguments #
-#####################################
-
-[random_forest]
-n_estimators = list_int ; 25
-max_depth = list_int ; 3
-criterion = list_str ; entropy
-
-[svm_linear]
-C = list_float ; 1
-
-[svm_rbf]
-C = list_float ; 1
-
-[svm_poly]
-C = list_float ; 1
-degree = list_int ; 2
-
-[adaboost]
-n_estimators = list_int ; 50
-base_estimator = list_str ; DecisionTreeClassifier
-
-[adaboost_pregen]
-n_estimators = list_int ; 50
-base_estimator = list_str ; DecisionTreeClassifier
-n_stumps = list_int ; 1
-
-[adaboost_graalpy]
-n_iterations = list_int ; 50
-n_stumps = list_int ; 1
-
-[decision_tree]
-max_depth = list_int ; 10
-criterion = list_str ; gini
-splitter = list_str ; best
-
-[decision_tree_pregen]
-max_depth = list_int ; 10
-criterion = list_str ; gini
-splitter = list_str ; best
-n_stumps = list_int ; 1
-
-[sgd]
-loss = list_str ; hinge
-penalty = list_str ; l2
-alpha = list_float ; 0.0001
-
-[knn]
-n_neighbors = list_int ; 5
-weights = list_str ; uniform
-algorithm = list_str ; auto
-
-[scm]
-model_type = list_str ; conjunction
-max_rules = list_int ; 10
-p = list_float ; 0.1
-
-[scm_pregen]
-model_type = list_str ; conjunction
-max_rules = list_int ; 10
-p = list_float ; 0.1
-n_stumps = list_int ; 1
-
-[cq_boost]
-mu = list_float ; 0.01
-epsilon = list_float ; 1e-06
-n_max_iterations = list_int ; 5
-n_stumps = list_int ; 1
-
-[cg_desc]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[cb_boost]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[min_cq_graalpy]
-mu = list_float ; 0.01
-n_stumps_per_attribute = list_int ; 1
-
-[min_cq_graalpy_tree]
-mu = list_float ; 0.01
-n_stumps_per_attribute = list_int ; 1
-max_depth = list_int ; 2
-
-[lasso]
-alpha = list_float ; 1
-max_iter = list_int ; 2
-
-[gradient_boosting]
-n_estimators = list_int ; 2
-
-[min_cq]
-mu = list_float ; 0.01
-n_stumps_per_attribute = list_int ; 1
-
-
-######################################
-# The Multiview Classifier arguments #
-######################################
-
-[weighted_linear_early_fusion]
-view_weights = list_str ; None
-monoview_classifier = list_str ; decision_tree
diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
index 79a8dc98bf0734ddf0f271fca729926ee3099eb1..c411fc8b1d0d3916b11262eb00ddfd2f4ab702fd 100644
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
@@ -539,14 +539,14 @@ def exec_one_benchmark_multicore(nb_cores=-1, labels_dictionary=None,
     for step_index in range(nb_multicore_to_do):
         results_multiview += Parallel(n_jobs=nb_cores)(
             delayed(exec_multiview_multicore)(directory, core_index, args["Base"]["name"],
-                                             classification_indices, k_folds,
-                                             args["Base"]["type"], args["Base"]["pathf"],
-                                             labels_dictionary, random_state,
-                                             labels,
-                                             hyper_param_search=hyper_param_search,
-                                             metrics=metrics,
-                                             nIter=args["Classification"]["hps_iter"],
-                                             **
+                                              classification_indices, k_folds,
+                                              args["Base"]["type"], args["Base"]["pathf"],
+                                              labels_dictionary, random_state,
+                                              labels,
+                                              hyper_param_search=hyper_param_search,
+                                              metrics=metrics,
+                                              n_iter=args["Classification"]["hps_iter"],
+                                              **
                                              argument_dictionaries["multiview"][
                                                  step_index * nb_cores + core_index])
             for core_index in
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
index 6d9ebf6f5551f12a9262eada53191124c4ea3004..4492cdff468546bfa28e1332528f4046ec7636aa 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
@@ -180,7 +180,7 @@ def init_train_test(X, Y, classificationIndices):
 
 def getHPs(classifierModule, hyper_param_search, nIter, classifier_module_name,
            classifier_class_name, X_train, y_train,
-           randomState,
+           random_state,
            outputFileName, KFolds, nbCores, metrics, kwargs):
     if hyper_param_search != "None":
         logging.debug(
@@ -188,7 +188,7 @@ def getHPs(classifierModule, hyper_param_search, nIter, classifier_module_name,
                 nIter) + " iterations for " + classifier_module_name)
         classifierHPSearch = getattr(hyper_parameter_search, hyper_param_search)
         clKWARGS, testFoldsPreds = classifierHPSearch(X_train, y_train, "monoview",
-                                                      randomState,
+                                                      random_state,
                                                       outputFileName,
                                                       classifierModule,
                                                       classifier_class_name,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py
index e4919f20b48c08205b7a5aa2dec1890a1c5a3424..0a691f664a15f27d8c3e3e2f98dff7bd236d2918 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py
@@ -1,317 +1,317 @@
-import itertools
-import math
-import os
-
-import numpy as np
-
-from ...utils.multiclass import isBiclass, genMulticlassMonoviewDecision
-
-
-def getClassifiersDecisions(allClassifersNames, views_indices, resultsMonoview):
-    """
-    This function gets the monoview classifiers decisions from resultsMonoview.
-    If no HP optimization is done, there is just one fold, the training set.
-    The classifiersDecisions variable is ordered as :
-    classifiersDecisions[viewIndex, classifierIndex, foldIndex, exampleIndex]
-    And the classifiers_names variable is ordered as :
-    classifiers_names[viewIndex][classifierIndex]
-    """
-    nbViews = len(views_indices)
-    nbClassifiers = len(allClassifersNames)
-    classifiersNames = [[] for _ in views_indices]
-    more_than_one_fold = len(resultsMonoview[0].test_folds_preds.shape) is not 1
-    if more_than_one_fold:
-        nbFolds = resultsMonoview[0].test_folds_preds.shape[0]
-        foldsLen = resultsMonoview[0].test_folds_preds.shape[1]
-    else:
-        nbFolds = 1
-        foldsLen = resultsMonoview[0].test_folds_preds.shape[0]
-
-    classifiersDecisions = np.zeros((nbViews, nbClassifiers, nbFolds, foldsLen))
-
-    for resultMonoview in resultsMonoview:
-        if resultMonoview.classifier_name in classifiersNames[
-            views_indices.index(resultMonoview.view_index)]:
-            pass
-        else:
-            classifiersNames[
-                views_indices.index(resultMonoview.view_index)].append(
-                resultMonoview.classifier_name)
-        classifierIndex = classifiersNames[
-            views_indices.index(resultMonoview.view_index)].index(
-            resultMonoview.classifier_name)
-        classifiersDecisions[views_indices.index(
-            resultMonoview.view_index), classifierIndex] = resultMonoview.test_folds_preds
-    # else:
-    #     train_len = resultsMonoview[0].test_folds_preds.shape[0]
-    #     classifiersDecisions = np.zeros((nbViews, nbClassifiers, 1, train_len))
-    #     for resultMonoview in resultsMonoview:
-    #         if resultMonoview.classifier_name in classifiersNames[viewsIndices.index(resultMonoview[0])]:
-    #             pass
-    #         else:
-    #             classifiersNames[viewsIndices.index(resultMonoview[0])].append(resultMonoview[1][0])
-    #         classifierIndex = classifiersNames[viewsIndices.index(resultMonoview[0])].index(resultMonoview[1][0])
-    #         classifiersDecisions[viewsIndices.index(resultMonoview[0]), classifierIndex] = resultMonoview[1][6]
-    return classifiersDecisions, classifiersNames
-
-
-def couple_div_measure(classifiersNames, classifiersDecisions, measurement,
-                       foldsGroudTruth):
-    """
-    This function is used to get the max of a couple diversity measurement,passed as an argument
-    It generates all possible combinations and all the couples to estimate the diversity on a combination
-    The best combination is the one that maximize the measurement.
-    """
-
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
-                         enumerate(combination)]
-        binomes = itertools.combinations(combiWithView, 2)
-        nbBinomes = int(
-            math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
-        couple_diversities = np.zeros(nbBinomes)
-        for binomeIndex, binome in enumerate(binomes):
-            (viewIndex1, classifierIndex1), (
-            viewIndex2, classifierIndex2) = binome
-            folds_couple_diversity = np.mean(
-                measurement(classifiersDecisions[viewIndex1, classifierIndex1],
-                            classifiersDecisions[viewIndex2, classifierIndex2],
-                            foldsGroudTruth)
-                , axis=1)
-            couple_diversities[binomeIndex] = np.mean(folds_couple_diversity)
-        div_measure[combinationsIndex] = np.mean(couple_diversities)
-    bestCombiIndex = np.argmax(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[bestCombiIndex]
-
-
-def global_div_measure(classifiersNames, classifiersDecisions, measurement,
-                       foldsGroudTruth):
-    """
-    This function is used to get the max of a diversity measurement,passed as an argument
-    It generates all possible combinations to estimate the diversity on a combination
-    The best combination is the one that maximize the measurement.
-    """
-
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(
-            nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        div_measure[combinationsIndex] = measurement(classifiersDecisions,
-                                                     combination,
-                                                     foldsGroudTruth, foldsLen)
-    bestCombiIndex = np.argmax(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[
-               bestCombiIndex]
-
-
-def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
-                   foldsGroudTruth):
-    """
-    This function is used to measure a pseudo-CQ measurement based on the minCq algorithm.
-    It's a mix between couple_div_measure and global_div_measure that uses multiple measurements.
-    """
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
-                         enumerate(combination)]
-        binomes = itertools.combinations(combiWithView, 2)
-        nbBinomes = int(
-            math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
-        disagreement = np.zeros(nbBinomes)
-        div_measure[combinationsIndex] = measurement[1](classifiersDecisions,
-                                                        combination,
-                                                        foldsGroudTruth,
-                                                        foldsLen)
-        for binomeIndex, binome in enumerate(binomes):
-            (viewIndex1, classifierIndex1), (
-            viewIndex2, classifierIndex2) = binome
-            nbDisagree = np.sum(measurement[0](
-                classifiersDecisions[viewIndex1, classifierIndex1],
-                classifiersDecisions[viewIndex2, classifierIndex2],
-                foldsGroudTruth)
-                                , axis=1) / float(foldsLen)
-            disagreement[binomeIndex] = np.mean(nbDisagree)
-        div_measure[combinationsIndex] /= float(np.mean(disagreement))
-    bestCombiIndex = np.argmin(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[
-               bestCombiIndex]
-
-
-def getFoldsGroundTruth(directory, folds=True):
-    """This function is used to get the labels of each fold example used in the measurements
-    foldsGroundTruth is formatted as
-    foldsGroundTruth[foldIndex, exampleIndex]"""
-    if folds:
-        foldsFilesNames = os.listdir(directory + "folds/")
-        foldLen = len(np.genfromtxt(directory + "folds/" + foldsFilesNames[0],
-                                    delimiter=','))
-        foldsGroudTruth = np.zeros((len(foldsFilesNames), foldLen), dtype=int)
-        for fileName in foldsFilesNames:
-            foldIndex = int(fileName[-5])
-            foldsGroudTruth[foldIndex] = np.genfromtxt(
-                directory + "folds/" + fileName, delimiter=',')[:foldLen]
-        return foldsGroudTruth
-    else:
-        train_labels = np.genfromtxt(directory + "train_labels.csv",
-                                     delimiter=',')
-        foldsGroudTruth = np.zeros((1, train_labels.shape[0]))
-        foldsGroudTruth[0] = train_labels
-        return foldsGroudTruth
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState,
-            directory, resultsMonoview, classificationIndices, measurement,
-            name):
-    """This function is a general function to get the args for all the measurements used"""
-    if len(resultsMonoview[0].test_folds_preds.shape) is not 1:
-        foldsGroundTruth = getFoldsGroundTruth(directory, folds=True)
-    else:
-        foldsGroundTruth = getFoldsGroundTruth(directory, folds=False)
-    monoviewClassifierModulesNames = benchmark["monoview"]
-    classifiersDecisions, classifiersNames = getClassifiersDecisions(
-        monoviewClassifierModulesNames,
-        viewsIndices,
-        resultsMonoview)
-    if name in ['disagree_fusion', 'double_fault_fusion']:
-        classifiersNames, div_measure = couple_div_measure(classifiersNames,
-                                                           classifiersDecisions,
-                                                           measurement,
-                                                           foldsGroundTruth)
-    elif name == "pseudo_cq_fusion":
-        classifiersNames, div_measure = CQ_div_measure(classifiersNames,
-                                                       classifiersDecisions,
-                                                       measurement,
-                                                       foldsGroundTruth)
-    else:
-        classifiersNames, div_measure = global_div_measure(classifiersNames,
-                                                           classifiersDecisions,
-                                                           measurement,
-                                                           foldsGroundTruth)
-    multiclass_preds = [monoviewResult.y_test_multiclass_pred for monoviewResult
-                        in resultsMonoview]
-    if isBiclass(multiclass_preds):
-        monoviewDecisions = np.array(
-            [monoviewResult.full_labels_pred for monoviewResult in
-             resultsMonoview
-             if
-             classifiersNames[viewsIndices.index(monoviewResult.view_index)] ==
-             monoviewResult.classifier_name])
-    else:
-        monoviewDecisions = np.array(
-            [genMulticlassMonoviewDecision(monoviewResult,
-                                           classificationIndices) for
-             monoviewResult in
-             resultsMonoview if classifiersNames[viewsIndices.index(
-                monoviewResult.view_index)] == monoviewResult.classifier_name])
-    argumentsList = []
-    arguments = {"CL_type": name,
-                 "views": views,
-                 "NB_VIEW": len(views),
-                 "viewsIndices": viewsIndices,
-                 "NB_CLASS": len(args.CL_classes),
-                 "LABELS_NAMES": args.CL_classes,
-                 name + "KWARGS": {
-                     "weights": args.DGF_weights,
-                     "classifiersNames": classifiersNames,
-                     "monoviewDecisions": monoviewDecisions,
-                     "nbCLass": len(args.CL_classes),
-                     "div_measure": div_measure
-                 }
-                 }
-    argumentsList.append(arguments)
-    return argumentsList
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    """Used to generate parameters sets for the random hyper parameters optimization function"""
-    weights = [
-        randomState.random_sample(len(classificationKWARGS["classifiersNames"]))
-        for _ in range(nIter)]
-    nomralizedWeights = [[weightVector / np.sum(weightVector)] for weightVector
-                         in weights]
-    return nomralizedWeights
-
-
-class DiversityFusionClass:
-    """This is a parent class for all the diversity fusion based classifiers."""
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        """Used to init the instances"""
-        if kwargs["weights"] == []:
-            self.weights = [1.0 / len(kwargs["classifiersNames"]) for _ in
-                            range(len(kwargs["classifiersNames"]))]
-        else:
-            self.weights = np.array(kwargs["weights"]) / np.sum(
-                np.array(kwargs["weights"]))
-        self.monoviewDecisions = kwargs["monoviewDecisions"]
-        self.classifiersNames = kwargs["classifiersNames"]
-        self.nbClass = kwargs["nbCLass"]
-        self.div_measure = kwargs["div_measure"]
-
-    def setParams(self, paramsSet):
-        """ Used to set the weights"""
-        self.weights = paramsSet[0]
-
-    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None,
-                 metric=["f1_score", None]):
-        """No need to fit as the monoview classifiers are already fitted"""
-        pass
-
-    def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
-        """Just a weighted majority vote"""
-        if usedIndices is None:
-            usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        votes = np.zeros((len(usedIndices), self.nbClass), dtype=float)
-        for usedIndex, exampleIndex in enumerate(usedIndices):
-            for monoviewDecisionIndex, monoviewDecision in enumerate(
-                    self.monoviewDecisions):
-                votes[usedIndex, monoviewDecision[
-                    exampleIndex]] += 1  # self.weights[monoviewDecisionIndex]
-        predictedLabels = np.argmax(votes, axis=1)
-        return predictedLabels
-
-    def predict_probas_hdf5(self, DATASET, usedIndices=None):
-        pass
-
-    def getConfigString(self, classificationKWARGS):
-        return "weights : " + ", ".join(map(str, list(self.weights)))
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : " + ', '.join(
-            self.classifiersNames)
-        return stringAnalysis
+# import itertools
+# import math
+# import os
+#
+# import numpy as np
+#
+# from ...utils.multiclass import isBiclass, genMulticlassMonoviewDecision
+#
+#
+# def getClassifiersDecisions(allClassifersNames, views_indices, resultsMonoview):
+#     """
+#     This function gets the monoview classifiers decisions from resultsMonoview.
+#     If no HP optimization is done, there is just one fold, the training set.
+#     The classifiersDecisions variable is ordered as :
+#     classifiersDecisions[viewIndex, classifierIndex, foldIndex, exampleIndex]
+#     And the classifiers_names variable is ordered as :
+#     classifiers_names[viewIndex][classifierIndex]
+#     """
+#     nbViews = len(views_indices)
+#     nbClassifiers = len(allClassifersNames)
+#     classifiersNames = [[] for _ in views_indices]
+#     more_than_one_fold = len(resultsMonoview[0].test_folds_preds.shape) is not 1
+#     if more_than_one_fold:
+#         nbFolds = resultsMonoview[0].test_folds_preds.shape[0]
+#         foldsLen = resultsMonoview[0].test_folds_preds.shape[1]
+#     else:
+#         nbFolds = 1
+#         foldsLen = resultsMonoview[0].test_folds_preds.shape[0]
+#
+#     classifiersDecisions = np.zeros((nbViews, nbClassifiers, nbFolds, foldsLen))
+#
+#     for resultMonoview in resultsMonoview:
+#         if resultMonoview.classifier_name in classifiersNames[
+#             views_indices.index(resultMonoview.view_index)]:
+#             pass
+#         else:
+#             classifiersNames[
+#                 views_indices.index(resultMonoview.view_index)].append(
+#                 resultMonoview.classifier_name)
+#         classifierIndex = classifiersNames[
+#             views_indices.index(resultMonoview.view_index)].index(
+#             resultMonoview.classifier_name)
+#         classifiersDecisions[views_indices.index(
+#             resultMonoview.view_index), classifierIndex] = resultMonoview.test_folds_preds
+#     # else:
+#     #     train_len = resultsMonoview[0].test_folds_preds.shape[0]
+#     #     classifiersDecisions = np.zeros((nbViews, nbClassifiers, 1, train_len))
+#     #     for resultMonoview in resultsMonoview:
+#     #         if resultMonoview.classifier_name in classifiersNames[viewsIndices.index(resultMonoview[0])]:
+#     #             pass
+#     #         else:
+#     #             classifiersNames[viewsIndices.index(resultMonoview[0])].append(resultMonoview[1][0])
+#     #         classifierIndex = classifiersNames[viewsIndices.index(resultMonoview[0])].index(resultMonoview[1][0])
+#     #         classifiersDecisions[viewsIndices.index(resultMonoview[0]), classifierIndex] = resultMonoview[1][6]
+#     return classifiersDecisions, classifiersNames
+#
+#
+# def couple_div_measure(classifiersNames, classifiersDecisions, measurement,
+#                        foldsGroudTruth):
+#     """
+#     This function is used to get the max of a couple diversity measurement,passed as an argument
+#     It generates all possible combinations and all the couples to estimate the diversity on a combination
+#     The best combination is the one that maximize the measurement.
+#     """
+#
+#     nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
+#     combinations = itertools.combinations_with_replacement(range(nbClassifiers),
+#                                                            nbViews)
+#     nbCombinations = int(
+#         math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
+#             nbViews) / math.factorial(nbClassifiers - 1))
+#     div_measure = np.zeros(nbCombinations)
+#     combis = np.zeros((nbCombinations, nbViews), dtype=int)
+#
+#     for combinationsIndex, combination in enumerate(combinations):
+#         combis[combinationsIndex] = combination
+#         combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
+#                          enumerate(combination)]
+#         binomes = itertools.combinations(combiWithView, 2)
+#         nbBinomes = int(
+#             math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
+#         couple_diversities = np.zeros(nbBinomes)
+#         for binomeIndex, binome in enumerate(binomes):
+#             (viewIndex1, classifierIndex1), (
+#             viewIndex2, classifierIndex2) = binome
+#             folds_couple_diversity = np.mean(
+#                 measurement(classifiersDecisions[viewIndex1, classifierIndex1],
+#                             classifiersDecisions[viewIndex2, classifierIndex2],
+#                             foldsGroudTruth)
+#                 , axis=1)
+#             couple_diversities[binomeIndex] = np.mean(folds_couple_diversity)
+#         div_measure[combinationsIndex] = np.mean(couple_diversities)
+#     bestCombiIndex = np.argmax(div_measure)
+#     bestCombination = combis[bestCombiIndex]
+#
+#     return [classifiersNames[viewIndex][index] for viewIndex, index in
+#             enumerate(bestCombination)], div_measure[bestCombiIndex]
+#
+#
+# def global_div_measure(classifiersNames, classifiersDecisions, measurement,
+#                        foldsGroudTruth):
+#     """
+#     This function is used to get the max of a diversity measurement,passed as an argument
+#     It generates all possible combinations to estimate the diversity on a combination
+#     The best combination is the one that maximize the measurement.
+#     """
+#
+#     nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
+#     combinations = itertools.combinations_with_replacement(range(nbClassifiers),
+#                                                            nbViews)
+#     nbCombinations = int(
+#         math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
+#             nbViews) / math.factorial(
+#             nbClassifiers - 1))
+#     div_measure = np.zeros(nbCombinations)
+#     combis = np.zeros((nbCombinations, nbViews), dtype=int)
+#     for combinationsIndex, combination in enumerate(combinations):
+#         combis[combinationsIndex] = combination
+#         div_measure[combinationsIndex] = measurement(classifiersDecisions,
+#                                                      combination,
+#                                                      foldsGroudTruth, foldsLen)
+#     bestCombiIndex = np.argmax(div_measure)
+#     bestCombination = combis[bestCombiIndex]
+#
+#     return [classifiersNames[viewIndex][index] for viewIndex, index in
+#             enumerate(bestCombination)], div_measure[
+#                bestCombiIndex]
+#
+#
+# def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
+#                    foldsGroudTruth):
+#     """
+#     This function is used to measure a pseudo-CQ measurement based on the minCq algorithm.
+#     It's a mix between couple_div_measure and global_div_measure that uses multiple measurements.
+#     """
+#     nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
+#     combinations = itertools.combinations_with_replacement(range(nbClassifiers),
+#                                                            nbViews)
+#     nbCombinations = int(
+#         math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
+#             nbViews) / math.factorial(nbClassifiers - 1))
+#     div_measure = np.zeros(nbCombinations)
+#     combis = np.zeros((nbCombinations, nbViews), dtype=int)
+#
+#     for combinationsIndex, combination in enumerate(combinations):
+#         combis[combinationsIndex] = combination
+#         combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
+#                          enumerate(combination)]
+#         binomes = itertools.combinations(combiWithView, 2)
+#         nbBinomes = int(
+#             math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
+#         disagreement = np.zeros(nbBinomes)
+#         div_measure[combinationsIndex] = measurement[1](classifiersDecisions,
+#                                                         combination,
+#                                                         foldsGroudTruth,
+#                                                         foldsLen)
+#         for binomeIndex, binome in enumerate(binomes):
+#             (viewIndex1, classifierIndex1), (
+#             viewIndex2, classifierIndex2) = binome
+#             nbDisagree = np.sum(measurement[0](
+#                 classifiersDecisions[viewIndex1, classifierIndex1],
+#                 classifiersDecisions[viewIndex2, classifierIndex2],
+#                 foldsGroudTruth)
+#                                 , axis=1) / float(foldsLen)
+#             disagreement[binomeIndex] = np.mean(nbDisagree)
+#         div_measure[combinationsIndex] /= float(np.mean(disagreement))
+#     bestCombiIndex = np.argmin(div_measure)
+#     bestCombination = combis[bestCombiIndex]
+#
+#     return [classifiersNames[viewIndex][index] for viewIndex, index in
+#             enumerate(bestCombination)], div_measure[
+#                bestCombiIndex]
+#
+#
+# def getFoldsGroundTruth(directory, folds=True):
+#     """This function is used to get the labels of each fold example used in the measurements
+#     foldsGroundTruth is formatted as
+#     foldsGroundTruth[foldIndex, exampleIndex]"""
+#     if folds:
+#         foldsFilesNames = os.listdir(directory + "folds/")
+#         foldLen = len(np.genfromtxt(directory + "folds/" + foldsFilesNames[0],
+#                                     delimiter=','))
+#         foldsGroudTruth = np.zeros((len(foldsFilesNames), foldLen), dtype=int)
+#         for fileName in foldsFilesNames:
+#             foldIndex = int(fileName[-5])
+#             foldsGroudTruth[foldIndex] = np.genfromtxt(
+#                 directory + "folds/" + fileName, delimiter=',')[:foldLen]
+#         return foldsGroudTruth
+#     else:
+#         train_labels = np.genfromtxt(directory + "train_labels.csv",
+#                                      delimiter=',')
+#         foldsGroudTruth = np.zeros((1, train_labels.shape[0]))
+#         foldsGroudTruth[0] = train_labels
+#         return foldsGroudTruth
+#
+#
+# def getArgs(args, benchmark, views, viewsIndices, randomState,
+#             directory, resultsMonoview, classificationIndices, measurement,
+#             name):
+#     """This function is a general function to get the args for all the measurements used"""
+#     if len(resultsMonoview[0].test_folds_preds.shape) is not 1:
+#         foldsGroundTruth = getFoldsGroundTruth(directory, folds=True)
+#     else:
+#         foldsGroundTruth = getFoldsGroundTruth(directory, folds=False)
+#     monoviewClassifierModulesNames = benchmark["monoview"]
+#     classifiersDecisions, classifiersNames = getClassifiersDecisions(
+#         monoviewClassifierModulesNames,
+#         viewsIndices,
+#         resultsMonoview)
+#     if name in ['disagree_fusion', 'double_fault_fusion']:
+#         classifiersNames, div_measure = couple_div_measure(classifiersNames,
+#                                                            classifiersDecisions,
+#                                                            measurement,
+#                                                            foldsGroundTruth)
+#     elif name == "pseudo_cq_fusion":
+#         classifiersNames, div_measure = CQ_div_measure(classifiersNames,
+#                                                        classifiersDecisions,
+#                                                        measurement,
+#                                                        foldsGroundTruth)
+#     else:
+#         classifiersNames, div_measure = global_div_measure(classifiersNames,
+#                                                            classifiersDecisions,
+#                                                            measurement,
+#                                                            foldsGroundTruth)
+#     multiclass_preds = [monoviewResult.y_test_multiclass_pred for monoviewResult
+#                         in resultsMonoview]
+#     if isBiclass(multiclass_preds):
+#         monoviewDecisions = np.array(
+#             [monoviewResult.full_labels_pred for monoviewResult in
+#              resultsMonoview
+#              if
+#              classifiersNames[viewsIndices.index(monoviewResult.view_index)] ==
+#              monoviewResult.classifier_name])
+#     else:
+#         monoviewDecisions = np.array(
+#             [genMulticlassMonoviewDecision(monoviewResult,
+#                                            classificationIndices) for
+#              monoviewResult in
+#              resultsMonoview if classifiersNames[viewsIndices.index(
+#                 monoviewResult.view_index)] == monoviewResult.classifier_name])
+#     argumentsList = []
+#     arguments = {"CL_type": name,
+#                  "views": views,
+#                  "NB_VIEW": len(views),
+#                  "viewsIndices": viewsIndices,
+#                  "NB_CLASS": len(args.CL_classes),
+#                  "LABELS_NAMES": args.CL_classes,
+#                  name + "KWARGS": {
+#                      "weights": args.DGF_weights,
+#                      "classifiersNames": classifiersNames,
+#                      "monoviewDecisions": monoviewDecisions,
+#                      "nbCLass": len(args.CL_classes),
+#                      "div_measure": div_measure
+#                  }
+#                  }
+#     argumentsList.append(arguments)
+#     return argumentsList
+#
+#
+# def genParamsSets(classificationKWARGS, randomState, nIter=1):
+#     """Used to generate parameters sets for the random hyper parameters optimization function"""
+#     weights = [
+#         randomState.random_sample(len(classificationKWARGS["classifiersNames"]))
+#         for _ in range(nIter)]
+#     nomralizedWeights = [[weightVector / np.sum(weightVector)] for weightVector
+#                          in weights]
+#     return nomralizedWeights
+#
+#
+# class DiversityFusionClass:
+#     """This is a parent class for all the diversity fusion based classifiers."""
+#
+#     def __init__(self, randomState, NB_CORES=1, **kwargs):
+#         """Used to init the instances"""
+#         if kwargs["weights"] == []:
+#             self.weights = [1.0 / len(kwargs["classifiersNames"]) for _ in
+#                             range(len(kwargs["classifiersNames"]))]
+#         else:
+#             self.weights = np.array(kwargs["weights"]) / np.sum(
+#                 np.array(kwargs["weights"]))
+#         self.monoviewDecisions = kwargs["monoviewDecisions"]
+#         self.classifiersNames = kwargs["classifiersNames"]
+#         self.nbClass = kwargs["nbCLass"]
+#         self.div_measure = kwargs["div_measure"]
+#
+#     def setParams(self, paramsSet):
+#         """ Used to set the weights"""
+#         self.weights = paramsSet[0]
+#
+#     def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None,
+#                  metric=["f1_score", None]):
+#         """No need to fit as the monoview classifiers are already fitted"""
+#         pass
+#
+#     def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
+#         """Just a weighted majority vote"""
+#         if usedIndices is None:
+#             usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
+#         votes = np.zeros((len(usedIndices), self.nbClass), dtype=float)
+#         for usedIndex, exampleIndex in enumerate(usedIndices):
+#             for monoviewDecisionIndex, monoviewDecision in enumerate(
+#                     self.monoviewDecisions):
+#                 votes[usedIndex, monoviewDecision[
+#                     exampleIndex]] += 1  # self.weights[monoviewDecisionIndex]
+#         predictedLabels = np.argmax(votes, axis=1)
+#         return predictedLabels
+#
+#     def predict_probas_hdf5(self, DATASET, usedIndices=None):
+#         pass
+#
+#     def getConfigString(self, classificationKWARGS):
+#         return "weights : " + ", ".join(map(str, list(self.weights)))
+#
+#     def getSpecificAnalysis(self, classificationKWARGS):
+#         stringAnalysis = "Classifiers used for each view : " + ', '.join(
+#             self.classifiersNames)
+#         return stringAnalysis
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
index b602d77f779fa60c40598bdda26f2722bdd63cdd..12adf384a462b064ea9042c6327ebd225f3e5d5a 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
@@ -16,16 +16,16 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
     """This is the base class for all the diversity fusion based classifiers."""
 
     def __init__(self, random_state=None, classifier_names=None,
-                 monoview_estimators=None, classifiers_configs=None):
+                 monoview_estimators=None, classifier_configs=None):
         """Used to init the instances"""
         super(DiversityFusionClassifier, self).__init__(random_state)
         if classifier_names is None:
             classifier_names = get_available_monoview_classifiers()
         self.classifier_names = classifier_names
-        self.param_names = ["classifiers_configs"]
+        self.param_names = ["classifier_configs"]
         self.distribs = [ConfigGenerator(get_available_monoview_classifiers())]
         self.estimator_pool = monoview_estimators
-        self.classifiers_configs = classifiers_configs
+        self.classifier_configs = classifier_configs
 
     def fit(self, X, y, train_indices=None, views_indices=None):
         train_indices, views_indices = get_examples_views_indices(X,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
index 0196e70957cd24010aa8c15786dbb00c3d2a96b8..80f77ec9cdbc6e950d01bd241c6deb8bb79bdf4d 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
@@ -1,10 +1,10 @@
 import inspect
 
 
-from ...multiview.multiview_utils import BaseMultiviewClassifier, get_monoview_classifier
+from ...multiview.multiview_utils import get_monoview_classifier
 
 
-class BaseLateFusionClassifier(BaseMultiviewClassifier):
+class BaseLateFusionClassifier():
 
     def init_monoview_estimator(self, classifier_name, classifier_index=None):
         if classifier_index is not None:
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
index 0ebefcf51daeb315b05b61d8c110df7a1cc01f0a..5da30a4a28f37be46ea3a0de05ed5270eebee10d 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
@@ -25,7 +25,7 @@ class BayesianInferenceClassifier(LateFusionClassifier):
         view_scores = []
         for index, view_index in enumerate(views_indices):
             view_scores.append(np.power(
-                self.monoviewClassifiers[index].predict_proba(X.get_v(view_index,
+                self.monoview_estimators[index].predict_proba(X.get_v(view_index,
                                                                       example_indices)),
                 self.weights[index]))
         view_scores = np.array(view_scores)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
index efbd3f17cda04e2e5fdc66b2a00c018c10d21d52..fdefbdda1ea479f1e35eb6d0486e0b994a020266 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
@@ -19,7 +19,7 @@ class WeightedLinearLateFusion(LateFusionClassifier):
         example_indices, views_indices = get_examples_views_indices(X, example_indices, views_indices)
         view_scores = []
         for index, viewIndex in enumerate(views_indices):
-            view_scores.append(np.array(self.monoviewClassifiers[index].predict_proba(
+            view_scores.append(np.array(self.monoview_estimators[index].predict_proba(
                 X.get_v(viewIndex, example_indices))) * self.weights[index])
         view_scores = np.array(view_scores)
         predicted_labels = np.argmax(np.sum(view_scores, axis=0), axis=1)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py b/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
index 5db045d77f506425adfae0ed0ea03d3a8de841e9..04b38548557a4c5810e13e25a9df12d219022de5 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
@@ -5,10 +5,10 @@ import os
 
 
 def get_the_args(path_to_config_file="../config_files/config.yml"):
-    """This is the main function for extracting the args for a '.ini' file"""
-    config_path = os.path.dirname(os.path.abspath(__file__))
-    config_path = os.path.join(config_path, "../..")
-    path_to_config_file = os.path.join(config_path, path_to_config_file)
+    """This is the main function for extracting the args for a '.yml' file"""
+    # config_path = os.path.dirname(os.path.abspath(__file__))
+    # config_path = os.path.join(config_path, "../..")
+    # path_to_config_file = os.path.join(config_path, path_to_config_file)
 
     with open(path_to_config_file, 'r') as stream:
         yaml_config = yaml.safe_load(stream)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
index 02a084a7c229832338458b2a1070deaf954c1e79..3b7e1f09483af6587e1398d90bdba495e0e3411c 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
@@ -91,7 +91,7 @@ def makeMeNoisy(viewData, random_state, percentage=5):
 
 
 def get_plausible_db_hdf5(features, pathF, name, NB_CLASS=3, LABELS_NAME="",
-                       randomState=None, full=True, add_noise=False,
+                       random_state=None, full=True, add_noise=False,
                        noise_std=0.15, nbView=3,
                    nbClass=2, datasetLength=100, randomStateInt=42, nbFeatures = 10):
     """Used to generate a plausible dataset to test the algorithms"""
@@ -468,7 +468,7 @@ def add_gaussian_noise(dataset_file, random_state, path_f, dataset_name,
 
 
 def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
-                       randomState, full=False, add_noise=False, noise_std=0.15,
+                       random_state, full=False, add_noise=False, noise_std=0.15,
                         delimiter=","):
     # TODO : Update this one
     labels_names = np.genfromtxt(pathF + nameDB + "-labels-names.csv",
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py b/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
index 114764faf8c6121106f8dcbddebb9866fa5d6b64..9872bea62a103fc716c13a64d6042e29d1dd799d 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
@@ -90,7 +90,7 @@ def get_test_folds_preds(X, y, cv, estimator, framework, available_indices=None)
     return test_folds_prediction
 
 
-def randomized_search_x(X, y, framework, random_state, output_file_name, classifier_module,
+def randomized_search(X, y, framework, random_state, output_file_name, classifier_module,
                       classifier_name, folds=4, nb_cores=1, metric=["accuracy_score", None],
                       n_iter=30, classifier_kwargs =None, learning_indices=None, view_indices=None):
     estimator = getattr(classifier_module, classifier_name)(random_state,
@@ -216,74 +216,74 @@ class MultiviewCompatibleRandomizedSearchCV(RandomizedSearchCV):
         return test_folds_prediction
 
 
-def randomized_search(dataset_var, labels, classifier_package, classifier_name,
-                      metrics_list, learning_indices, k_folds, random_state,
-                      views_indices=None, n_iter=1,
-                      nb_cores=1, **classification_kargs):
-    """Used to perform a random search on the classifiers to optimize hyper parameters"""
-    if views_indices is None:
-        views_indices = range(dataset_var.get("Metadata").attrs["nbView"])
-    metric = metrics_list[0]
-    metric_module = getattr(metrics, metric[0])
-    if metric[1] is not None:
-        metric_kargs = dict((index, metricConfig) for index, metricConfig in
-                            enumerate(metric[1]))
-    else:
-        metric_kargs = {}
-    classifier_module = getattr(classifier_package, classifier_name + "Module")
-    classifier_class = getattr(classifier_module, classifier_name + "Class")
-    if classifier_name != "Mumbo":
-        params_sets = classifier_module.gen_params_sets(classification_kargs,
-                                                    random_state, n_iter=n_iter)
-        if metric_module.getConfig()[-14] == "h":
-            base_score = -1000.0
-            is_better = "higher"
-        else:
-            base_score = 1000.0
-            is_better = "lower"
-        best_settings = None
-        kk_folds = k_folds.split(learning_indices, labels[learning_indices])
-        for params_set in params_sets:
-            scores = []
-            for trainIndices, testIndices in kk_folds:
-                classifier = classifier_class(random_state, nb_scores=nb_cores,
-                                             **classification_kargs)
-                classifier.setParams(params_set)
-                classifier.fit_hdf5(dataset_var, labels,
-                                    train_indices=learning_indices[trainIndices],
-                                    views_indices=views_indices)
-                test_labels = classifier.predict_hdf5(dataset_var,
-                                                      used_indices=learning_indices[testIndices],
-                                                      views_indices=views_indices)
-                test_score = metric_module.score(
-                    labels[learning_indices[testIndices]], test_labels)
-                scores.append(test_score)
-            cross_val_score = np.mean(np.array(scores))
-
-            if is_better == "higher" and cross_val_score > base_score:
-                base_score = cross_val_score
-                best_settings = params_set
-            elif is_better == "lower" and cross_val_score < base_score:
-                base_score = cross_val_score
-                best_settings = params_set
-        classifier = classifier_class(random_state, nb_cores=nb_cores,
-                                     **classification_kargs)
-        classifier.setParams(best_settings)
-
-    # TODO : This must be corrected
-    else:
-        best_configs, _ = classifier_module.grid_search_hdf5(dataset_var, labels,
-                                                             views_indices,
-                                                             classification_kargs,
-                                                             learning_indices,
-                                                             random_state,
-                                                             metric=metric,
-                                                             nI_iter=n_iter)
-        classification_kargs["classifiersConfigs"] = best_configs
-        classifier = classifier_class(random_state, nb_cores=nb_cores,
-                                      **classification_kargs)
-
-    return classifier
+# def randomized_search_(dataset_var, labels, classifier_package, classifier_name,
+#                       metrics_list, learning_indices, k_folds, random_state,
+#                       views_indices=None, n_iter=1,
+#                       nb_cores=1, **classification_kargs):
+#     """Used to perform a random search on the classifiers to optimize hyper parameters"""
+#     if views_indices is None:
+#         views_indices = range(dataset_var.get("Metadata").attrs["nbView"])
+#     metric = metrics_list[0]
+#     metric_module = getattr(metrics, metric[0])
+#     if metric[1] is not None:
+#         metric_kargs = dict((index, metricConfig) for index, metricConfig in
+#                             enumerate(metric[1]))
+#     else:
+#         metric_kargs = {}
+#     classifier_module = getattr(classifier_package, classifier_name + "Module")
+#     classifier_class = getattr(classifier_module, classifier_name + "Class")
+#     if classifier_name != "Mumbo":
+#         params_sets = classifier_module.gen_params_sets(classification_kargs,
+#                                                     random_state, n_iter=n_iter)
+#         if metric_module.getConfig()[-14] == "h":
+#             base_score = -1000.0
+#             is_better = "higher"
+#         else:
+#             base_score = 1000.0
+#             is_better = "lower"
+#         best_settings = None
+#         kk_folds = k_folds.split(learning_indices, labels[learning_indices])
+#         for params_set in params_sets:
+#             scores = []
+#             for trainIndices, testIndices in kk_folds:
+#                 classifier = classifier_class(random_state, nb_scores=nb_cores,
+#                                              **classification_kargs)
+#                 classifier.setParams(params_set)
+#                 classifier.fit_hdf5(dataset_var, labels,
+#                                     train_indices=learning_indices[trainIndices],
+#                                     views_indices=views_indices)
+#                 test_labels = classifier.predict_hdf5(dataset_var,
+#                                                       used_indices=learning_indices[testIndices],
+#                                                       views_indices=views_indices)
+#                 test_score = metric_module.score(
+#                     labels[learning_indices[testIndices]], test_labels)
+#                 scores.append(test_score)
+#             cross_val_score = np.mean(np.array(scores))
+#
+#             if is_better == "higher" and cross_val_score > base_score:
+#                 base_score = cross_val_score
+#                 best_settings = params_set
+#             elif is_better == "lower" and cross_val_score < base_score:
+#                 base_score = cross_val_score
+#                 best_settings = params_set
+#         classifier = classifier_class(random_state, nb_cores=nb_cores,
+#                                      **classification_kargs)
+#         classifier.setParams(best_settings)
+#
+#     # TODO : This must be corrected
+#     else:
+#         best_configs, _ = classifier_module.grid_search_hdf5(dataset_var, labels,
+#                                                              views_indices,
+#                                                              classification_kargs,
+#                                                              learning_indices,
+#                                                              random_state,
+#                                                              metric=metric,
+#                                                              nI_iter=n_iter)
+#         classification_kargs["classifiersConfigs"] = best_configs
+#         classifier = classifier_class(random_state, nb_cores=nb_cores,
+#                                       **classification_kargs)
+#
+#     return classifier
 
 
 def spear_mint(dataset, classifier_name, views_indices=None, k_folds=None, n_iter=1,
diff --git a/multiview_platform/tests/test_ExecClassif.py b/multiview_platform/tests/test_ExecClassif.py
index 24d4f0cf6c8eba250c1b0863545cf157ae8d5cc2..3b6f3f570bb573b00dbbea44e1e0b91ad438e4ce 100644
--- a/multiview_platform/tests/test_ExecClassif.py
+++ b/multiview_platform/tests/test_ExecClassif.py
@@ -277,7 +277,7 @@ class Test_execBenchmark(unittest.TestCase):
                                          10, cls.Dataset,
                                          exec_one_benchmark=fakeBenchmarkExec,
                                          exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                         exec_one_benchmark_monoCore=fakeBenchmarkExec_monocore,
+                                         exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
         cls.assertEqual(res, 3)
@@ -288,7 +288,7 @@ class Test_execBenchmark(unittest.TestCase):
                                          10, cls.Dataset,
                                          exec_one_benchmark=fakeBenchmarkExec,
                                          exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                         exec_oneBenchmark_mono_core=fakeBenchmarkExec_monocore,
+                                         exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
         cls.assertEqual(res, 3)
@@ -304,14 +304,14 @@ class Test_execBenchmark(unittest.TestCase):
 
 def fakeExecMono(directory, name, labels_names, classification_indices, k_folds,
                  coreIndex, type, pathF, random_state, labels,
-                 hyper_param_search="try", metrics="try", nIter=1, **arguments):
+                 hyper_param_search="try", metrics="try", n_iter=1, **arguments):
     return ["Mono", arguments]
 
 
 def fakeExecMulti(directory, coreIndex, name, classification_indices, k_folds,
                   type, pathF, labels_dictionary,
                   random_state, labels, hyper_param_search="", metrics=None,
-                  nIter=1, **arguments):
+                  n_iter=1, **arguments):
     return ["Multi", arguments]
 
 
@@ -412,8 +412,7 @@ class Test_execOneBenchmark_multicore(unittest.TestCase):
 
     def test_simple(cls):
         flag, results = exec_classif.exec_one_benchmark_multicore(
-            nbCores=2,
-
+            nb_cores=2,
             labels_dictionary={0: "a", 1: "b"},
             directory="multiview_platform/tests/tmp_tests/",
             classification_indices=([1, 2, 3, 4], [0, 10, 20, 30, 40]),
diff --git a/multiview_platform/tests/test_mono_view/test_ExecClassifMonoView.py b/multiview_platform/tests/test_mono_view/test_ExecClassifMonoView.py
index 941cae735b5d742a50ab85dd906e38f093123563..763c480cdd0b85e79aaca9df69cbda589523dde9 100644
--- a/multiview_platform/tests/test_mono_view/test_ExecClassifMonoView.py
+++ b/multiview_platform/tests/test_mono_view/test_ExecClassifMonoView.py
@@ -16,9 +16,9 @@ class Test_initConstants(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         rm_tmp()
-        os.mkdir("multiview_platform/tests/temp_tests")
+        os.mkdir("multiview_platform/tests/tmp_tests")
         cls.datasetFile = h5py.File(
-            "multiview_platform/tests/temp_tests/test.hdf5", "w")
+            "multiview_platform/tests/tmp_tests/test.hdf5", "w")
         cls.random_state = np.random.RandomState(42)
         cls.args = {"classifier_name": "test_clf"}
         cls.X_value = cls.random_state.randint(0, 500, (10, 20))
@@ -30,7 +30,7 @@ class Test_initConstants(unittest.TestCase):
                                      np.array([1, 3, 5, 7, 9])]
         cls.labels_names = ["test_true", "test_false"]
         cls.name = "test"
-        cls.directory = "multiview_platform/tests/temp_tests/test_dir/"
+        cls.directory = "multiview_platform/tests/tmp_tests/test_dir/"
 
     def test_simple(cls):
         kwargs, \
@@ -56,12 +56,12 @@ class Test_initConstants(unittest.TestCase):
 
     @classmethod
     def tearDownClass(cls):
-        os.remove("multiview_platform/tests/temp_tests/test.hdf5")
+        os.remove("multiview_platform/tests/tmp_tests/test.hdf5")
         os.rmdir(
-            "multiview_platform/tests/temp_tests/test_dir/test_clf/test_dataset")
-        os.rmdir("multiview_platform/tests/temp_tests/test_dir/test_clf")
-        os.rmdir("multiview_platform/tests/temp_tests/test_dir")
-        os.rmdir("multiview_platform/tests/temp_tests")
+            "multiview_platform/tests/tmp_tests/test_dir/test_clf/test_dataset")
+        os.rmdir("multiview_platform/tests/tmp_tests/test_dir/test_clf")
+        os.rmdir("multiview_platform/tests/tmp_tests/test_dir")
+        os.rmdir("multiview_platform/tests/tmp_tests")
 
 
 class Test_initTrainTest(unittest.TestCase):
@@ -77,13 +77,8 @@ class Test_initTrainTest(unittest.TestCase):
                                      np.array([1, 3, 5, 7, 9])]
 
     def test_simple(cls):
-<<<<<<< HEAD
-        X_train, y_train, X_test, y_test, X_test_multiclass = exec_classif_mono_view.initTrainTest(
-            cls.X, cls.Y, cls.classification_indices)
-=======
         X_train, y_train, X_test, y_test, X_test_multiclass = exec_classif_mono_view.init_train_test(
-            cls.X, cls.Y, cls.classificationIndices)
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+            cls.X, cls.Y, cls.classification_indices)
         np.testing.assert_array_equal(X_train, np.array(
             [np.array([102, 435, 348, 270, 106]),
              np.array([466, 214, 330, 458, 87]),
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py
deleted file mode 100644
index 528cd15df475bbd46285cf8bb0b22eebbca3a2f7..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-from ....mono_multi_view_classifiers.multiview_classifiers.disagree_fusion import \
-    disagree_fusion
-from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
-
-class Test_disagreement(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.views_indices =  np.array([0, 1])
-        cls.classifiersDecisions = np.zeros((cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-                                            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.views_indices):
-                cls.classifiersDecisions[view_index, classifer_index] = np.array([
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array([np.array([1,1,1,0,0,0]) for _ in range(3)])
-        cls.classification_indices = np.array([])
-
-    def test_simple(cls):
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions, disagree_fusion.disagree, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.666666666667)
-        cls.assertEqual(len(bestCombi), 2)
-
-    def test_multipleViews(cls):
-        cls.views_indices = np.array([0, 6, 18])
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.classifiersDecisions = np.zeros(
-            (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.views_indices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6)])
-
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions,
-            disagree_fusion.disagree, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.55555555555555)
-        cls.assertEqual(len(bestCombi), 3)
-
-
-class Test_disagree(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoviewDecision1 = np.array([0, 0, 1, 1])
-        cls.monoviewDecision2 = np.array([0, 1, 0, 1])
-        cls.ground_truth = None
-
-    def test_simple(cls):
-        disagreement = disagree_fusion.disagree(cls.monoviewDecision1,
-                                                cls.monoviewDecision2,
-                                                cls.ground_truth)
-        np.testing.assert_array_equal(disagreement,
-                                      np.array([False, True, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py
deleted file mode 100644
index 676dac183b6b79eca2800f859cc08b3af9d8a5c1..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-from ....mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion import \
-    double_fault_fusion
-
-
-class Test_doubleFaultRatio(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.directory = ""
-        cls.views_indices = np.array([0, 1])
-        cls.classifiersDecisions = np.zeros(
-            (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.views_indices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array([np.array([1,1,1,0,0,0]) for _ in range(3)])
-
-    def test_simple(cls):
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames,cls.classifiersDecisions,
-            double_fault_fusion.doubleFault, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.3888888888888)
-        cls.assertEqual(len(bestCombi), 2)
-
-    def test_multipleViews(cls):
-        cls.views_indices = np.array([0, 6, 18])
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.classifiersDecisions = np.zeros(
-            (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.views_indices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6)])
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions,
-            double_fault_fusion.doubleFault, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.3333333333)
-        cls.assertEqual(len(bestCombi), 3)
-
-
-class Test_doubleFault(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoviewDecision1 = np.array([0, 0, 1, 1, 0, 0, 1, 1])
-        cls.monoviewDecision2 = np.array([0, 1, 0, 1, 0, 1, 0, 1])
-        cls.ground_truth = np.array([0, 0, 0, 0, 1, 1, 1, 1])
-
-    def test_simple(cls):
-        disagreement = double_fault_fusion.doubleFault(
-            cls.monoviewDecision1, cls.monoviewDecision2, cls.ground_truth)
-        np.testing.assert_array_equal(disagreement, np.array(
-            [False, False, False, True, True, False, False, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/Test_Methods/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/Test_Methods/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/test_FusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/test_FusionModule.py
deleted file mode 100644
index 369fb4e8211afff509506420f3204d0bfa3f45e1..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_Fusion/test_FusionModule.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import unittest
-
-from ....mono_multi_view_classifiers.multiview_classifiers.fusion import \
-    fusion
-
-
-class Test_genName(unittest.TestCase):
-
-    def test_late(self):
-        self.config = {"fusionType": "LateFusion",
-                       "fusionMethod": "chicken_is_heaven",
-                       "classifiers_names": ["cheese", "is", "no", "disease"]}
-        res = fusion.genName(self.config)
-        self.assertEqual(res, "Late-chic")
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
index 522022041d942bf908ee19671e238887906a240a..a1b1ad2f85d88f65f6b762cfa9d5c01e92e4b762 100644
--- a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
+++ b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
@@ -24,7 +24,7 @@ class FakeDivCoupleClf(diversity_utils.CoupleDiversityFusionClassifier):
                  classifiers_config=None, monoview_estimators=None):
         super(FakeDivCoupleClf, self).__init__(random_state=rs,
                                                classifier_names=classifier_names,
-                                               classifiers_configs=classifiers_config,
+                                               classifier_configs=classifiers_config,
                                                monoview_estimators=monoview_estimators)
         self.rs = rs
 
@@ -38,7 +38,7 @@ class FakeDivGlobalClf(diversity_utils.GlobalDiversityFusionClassifier):
                  classifiers_config=None, monoview_estimators=None):
         super(FakeDivGlobalClf, self).__init__(random_state=rs,
                                                classifier_names=classifier_names,
-                                               classifiers_configs=classifiers_config,
+                                               classifier_configs=classifiers_config,
                                                monoview_estimators=monoview_estimators)
         self.rs = rs
 
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
index 5a2d5ea0bd006d96579f1ba976f2e4917e74e996..46c9e59652d29787f2a10a3faca09f796c300f72 100644
--- a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
+++ b/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
@@ -1,44 +1,42 @@
-
-<<<<<<< HEAD
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-
-
-def fake_measure(a, b, c, d, e):
-    return 42
-
-
-class Test_global_div_measure(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.views_indices = np.array([0, 1])
-        cls.classifiersDecisions = np.zeros(
-            (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.views_indices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6),
-                    cls.random_state.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array(
-            [np.array([1, 1, 1, 0, 0, 0]) for _ in range(3)])
-        cls.classification_indices = np.array([])
-        cls.measurement = fake_measure
-
-    def test_simple(cls):
-        clf_names, diversity_measure = diversity_utils.global_div_measure(
-            cls.allClassifiersNames,
-            cls.classifiersDecisions,
-            cls.measurement,
-            cls.folds_ground_truth)
-        cls.assertEqual(len(clf_names), 2)
-        cls.assertEqual(diversity_measure, 42)
-=======
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+#
+# import numpy as np
+# import unittest
+#
+# from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
+#     diversity_utils
+#
+#
+# def fake_measure(a, b, c, d, e):
+#     return 42
+#
+#
+# class Test_global_div_measure(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.random_state = np.random.RandomState(42)
+#         cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
+#         cls.views_indices = np.array([0, 1])
+#         cls.classifiersDecisions = np.zeros(
+#             (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
+#             dtype=int)
+#         for classifer_index, classifier in enumerate(cls.allClassifiersNames):
+#             for view_index, view in enumerate(cls.views_indices):
+#                 cls.classifiersDecisions[
+#                     view_index, classifer_index] = np.array([
+#                     cls.random_state.randint(0, 2, 6),
+#                     cls.random_state.randint(0, 2, 6),
+#                     cls.random_state.randint(0, 2, 6)])
+#         cls.folds_ground_truth = np.array(
+#             [np.array([1, 1, 1, 0, 0, 0]) for _ in range(3)])
+#         cls.classification_indices = np.array([])
+#         cls.measurement = fake_measure
+#
+#     def test_simple(cls):
+#         clf_names, diversity_measure = diversity_utils.global_div_measure(
+#             cls.allClassifiersNames,
+#             cls.classifiersDecisions,
+#             cls.measurement,
+#             cls.folds_ground_truth)
+#         cls.assertEqual(len(clf_names), 2)
+#         cls.assertEqual(diversity_measure, 42)
diff --git a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py b/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
index 0cc3af536993c202f60b7fad6c2cc92309618781..d237daad9cfa2d9eb6b4789b69c9669e47576575 100644
--- a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
+++ b/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
@@ -5,17 +5,19 @@ import h5py
 import numpy as np
 
 from ...mono_multi_view_classifiers.utils import get_multiview_db
+from ..utils import rm_tmp
 
 
 class Test_copyhdf5Dataset(unittest.TestCase):
 
     @classmethod
     def setUpClass(cls):
+        rm_tmp()
         cls.random_state = np.random.RandomState(42)
-        if not os.path.exists("multiview_platform/tests/temp_tests"):
-            os.mkdir("multiview_platform/tests/temp_tests")
+        if not os.path.exists("multiview_platform/tests/tmp_tests"):
+            os.mkdir("multiview_platform/tests/tmp_tests")
         cls.dataset_file = h5py.File(
-            "multiview_platform/tests/temp_tests/test_copy.hdf5", "w")
+            "multiview_platform/tests/tmp_tests/test_copy.hdf5", "w")
         cls.dataset = cls.dataset_file.create_dataset("test",
                                                       data=cls.random_state.randint(
                                                           0, 100, (10, 20)))
@@ -41,20 +43,21 @@ class Test_copyhdf5Dataset(unittest.TestCase):
 
     @classmethod
     def tearDownClass(cls):
-        os.remove("multiview_platform/tests/temp_tests/test_copy.hdf5")
-        os.rmdir("multiview_platform/tests/temp_tests")
+        os.remove("multiview_platform/tests/tmp_tests/test_copy.hdf5")
+        os.rmdir("multiview_platform/tests/tmp_tests")
 
 
 class Test_filterViews(unittest.TestCase):
 
     @classmethod
     def setUpClass(cls):
+        rm_tmp()
         cls.random_state = np.random.RandomState(42)
         cls.views = ["test_view_1", "test_view_2"]
-        if not os.path.exists("multiview_platform/tests/temp_tests"):
-            os.mkdir("multiview_platform/tests/temp_tests")
+        if not os.path.exists("multiview_platform/tests/tmp_tests"):
+            os.mkdir("multiview_platform/tests/tmp_tests")
         cls.dataset_file = h5py.File(
-            "multiview_platform/tests/temp_tests/test_copy.hdf5", "w")
+            "multiview_platform/tests/tmp_tests/test_copy.hdf5", "w")
         cls.metadata_group = cls.dataset_file.create_group("Metadata")
         cls.metadata_group.attrs["nbView"] = 4
 
@@ -66,7 +69,7 @@ class Test_filterViews(unittest.TestCase):
 
     def test_simple_filter(cls):
         cls.temp_dataset_file = h5py.File(
-            "multiview_platform/tests/temp_tests/test_copy_temp.hdf5", "w")
+            "multiview_platform/tests/tmp_tests/test_copy_temp.hdf5", "w")
         cls.dataset_file.copy("Metadata", cls.temp_dataset_file)
         get_multiview_db.filter_views(cls.dataset_file, cls.temp_dataset_file,
                                      cls.views, np.arange(10))
@@ -79,7 +82,7 @@ class Test_filterViews(unittest.TestCase):
 
     def test_filter_view_and_examples(cls):
         cls.temp_dataset_file = h5py.File(
-            "multiview_platform/tests/temp_tests/test_copy_temp.hdf5", "w")
+            "multiview_platform/tests/tmp_tests/test_copy_temp.hdf5", "w")
         cls.dataset_file.copy("Metadata", cls.temp_dataset_file)
         usedIndices = cls.random_state.choice(10, 6, replace=False)
         get_multiview_db.filter_views(cls.dataset_file, cls.temp_dataset_file,
@@ -91,9 +94,9 @@ class Test_filterViews(unittest.TestCase):
 
     @classmethod
     def tearDownClass(cls):
-        os.remove("multiview_platform/tests/temp_tests/test_copy.hdf5")
-        os.remove("multiview_platform/tests/temp_tests/test_copy_temp.hdf5")
-        os.rmdir("multiview_platform/tests/temp_tests")
+        os.remove("multiview_platform/tests/tmp_tests/test_copy.hdf5")
+        os.remove("multiview_platform/tests/tmp_tests/test_copy_temp.hdf5")
+        os.rmdir("multiview_platform/tests/tmp_tests")
 
 
 #
@@ -336,11 +339,12 @@ class Test_getClassicDBhdf5(unittest.TestCase):
 
     @classmethod
     def setUpClass(cls):
-        if not os.path.exists("multiview_platform/tests/temp_tests"):
-            os.mkdir("multiview_platform/tests/temp_tests")
+        rm_tmp()
+        if not os.path.exists("multiview_platform/tests/tmp_tests"):
+            os.mkdir("multiview_platform/tests/tmp_tests")
         cls.dataset_file = h5py.File(
-            "multiview_platform/tests/temp_tests/test_dataset.hdf5", "w")
-        cls.pathF = "multiview_platform/tests/temp_tests/"
+            "multiview_platform/tests/tmp_tests/test_dataset.hdf5", "w")
+        cls.pathF = "multiview_platform/tests/tmp_tests/"
         cls.nameDB = "test_dataset"
         cls.NB_CLASS = 2
         cls.askedLabelsNames = ["test_label_1", "test_label_3"]
@@ -446,21 +450,22 @@ class Test_getClassicDBhdf5(unittest.TestCase):
     @classmethod
     def tearDownClass(cls):
         os.remove(
-            "multiview_platform/tests/temp_tests/test_dataset_temp_view_label_select.hdf5")
-        os.remove("multiview_platform/tests/temp_tests/test_dataset.hdf5")
-        dirs = os.listdir("multiview_platform/tests/temp_tests")
+            "multiview_platform/tests/tmp_tests/test_dataset_temp_view_label_select.hdf5")
+        os.remove("multiview_platform/tests/tmp_tests/test_dataset.hdf5")
+        dirs = os.listdir("multiview_platform/tests/tmp_tests")
         for dir in dirs:
             print(dir)
-        os.rmdir("multiview_platform/tests/temp_tests")
+        os.rmdir("multiview_platform/tests/tmp_tests")
 
 
 class Test_getClassicDBcsv(unittest.TestCase):
 
     @classmethod
     def setUpClass(cls):
-        if not os.path.exists("multiview_platform/tests/temp_tests"):
-            os.mkdir("multiview_platform/tests/temp_tests")
-        cls.pathF = "multiview_platform/tests/temp_tests/"
+        rm_tmp()
+        if not os.path.exists("multiview_platform/tests/tmp_tests"):
+            os.mkdir("multiview_platform/tests/tmp_tests")
+        cls.pathF = "multiview_platform/tests/tmp_tests/"
         cls.NB_CLASS = 2
         cls.nameDB = "test_dataset"
         cls.askedLabelsNames = ["test_label_1", "test_label_3"]
@@ -566,15 +571,15 @@ class Test_getClassicDBcsv(unittest.TestCase):
     def tearDownClass(cls):
         for i in range(4):
             os.remove(
-                "multiview_platform/tests/temp_tests/Views/test_view_" + str(
+                "multiview_platform/tests/tmp_tests/Views/test_view_" + str(
                     i) + ".csv")
-        os.rmdir("multiview_platform/tests/temp_tests/Views")
+        os.rmdir("multiview_platform/tests/tmp_tests/Views")
         os.remove(
-            "multiview_platform/tests/temp_tests/test_dataset-labels-names.csv")
-        os.remove("multiview_platform/tests/temp_tests/test_dataset-labels.csv")
-        os.remove("multiview_platform/tests/temp_tests/test_dataset.hdf5")
+            "multiview_platform/tests/tmp_tests/test_dataset-labels-names.csv")
+        os.remove("multiview_platform/tests/tmp_tests/test_dataset-labels.csv")
+        os.remove("multiview_platform/tests/tmp_tests/test_dataset.hdf5")
         os.remove(
-            "multiview_platform/tests/temp_tests/test_dataset_temp_view_label_select.hdf5")
-        for file in os.listdir("multiview_platform/tests/temp_tests"): print(
+            "multiview_platform/tests/tmp_tests/test_dataset_temp_view_label_select.hdf5")
+        for file in os.listdir("multiview_platform/tests/tmp_tests"): print(
             file)
-        os.rmdir("multiview_platform/tests/temp_tests")
+        os.rmdir("multiview_platform/tests/tmp_tests")
diff --git a/multiview_platform/tests/test_utils/test_configuration.py b/multiview_platform/tests/test_utils/test_configuration.py
index 289af382ed4695e55d0c1528459cc905d1af7909..324deb20c34c7cdb8ed11f88ca5c4e30321fa8c9 100644
--- a/multiview_platform/tests/test_utils/test_configuration.py
+++ b/multiview_platform/tests/test_utils/test_configuration.py
@@ -9,15 +9,17 @@ from multiview_platform.mono_multi_view_classifiers.utils import configuration
 
 class Test_get_the_args(unittest.TestCase):
 
-    def setUp(self):
+    @classmethod
+    def setUpClass(cls):
         rm_tmp()
-        self.path_to_config_file = "multiview_platform/tests/tmp_tests/config_temp.yml"
+        cls.path_to_config_file = "multiview_platform/tests/tmp_tests/config_temp.yml"
         os.mkdir("multiview_platform/tests/tmp_tests")
         data = {"Base":{"first_arg": 10, "second_arg":[12.5, 1e-06]}, "Classification":{"third_arg":True}}
-        with open(self.path_to_config_file, "w") as config_file:
+        with open(cls.path_to_config_file, "w") as config_file:
             yaml.dump(data, config_file)
 
-    def tearDown(self):
+    @classmethod
+    def tearDownClass(cls):
         os.remove("multiview_platform/tests/tmp_tests/config_temp.yml")
         os.rmdir("multiview_platform/tests/tmp_tests")
 
diff --git a/multiview_platform/tests/test_utils/test_execution.py b/multiview_platform/tests/test_utils/test_execution.py
index 2c937a23d7d88b6903a5d84c1539692f15f6c8ac..7079200f75988903d414e529f0f6bfb61fe6053d 100644
--- a/multiview_platform/tests/test_utils/test_execution.py
+++ b/multiview_platform/tests/test_utils/test_execution.py
@@ -26,27 +26,17 @@ class Test_initStatsIterRandomStates(unittest.TestCase):
         cls.statsIter = 1
 
     def test_one_statiter(cls):
-<<<<<<< HEAD
+
         cls.state = cls.random_state.get_state()[1]
-        statsIterRandomStates = execution.initStatsIterRandomStates(
-            cls.statsIter, cls.random_state)
-=======
-        cls.state = cls.randomState.get_state()[1]
         statsIterRandomStates = execution.init_stats_iter_random_states(
-            cls.statsIter, cls.randomState)
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+            cls.statsIter, cls.random_state)
         np.testing.assert_array_equal(statsIterRandomStates[0].get_state()[1],
                                       cls.state)
 
     def test_multiple_iter(cls):
         cls.statsIter = 3
-<<<<<<< HEAD
-        statsIterRandomStates = execution.initStatsIterRandomStates(
-            cls.statsIter, cls.random_state)
-=======
         statsIterRandomStates = execution.init_stats_iter_random_states(
-            cls.statsIter, cls.randomState)
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+            cls.statsIter, cls.random_state)
         cls.assertAlmostEqual(len(statsIterRandomStates), 3)
         cls.assertNotEqual(statsIterRandomStates[0].randint(5000),
                            statsIterRandomStates[1].randint(5000))
@@ -96,24 +86,9 @@ class Test_initRandomState(unittest.TestCase):
 
     def test_random_state_42(self):
         randomState_42 = np.random.RandomState(42)
-<<<<<<< HEAD
-        random_state = execution.initRandomState("42",
-                                                "multiview_platform/tests/temp_tests/")
-        os.remove("multiview_platform/tests/temp_tests/random_state.pickle")
-        np.testing.assert_array_equal(random_state.beta(1, 100, 100),
-                                      randomState_42.beta(1, 100, 100))
-
-    def test_random_state_pickle(self):
-        randomState_to_pickle = execution.initRandomState(None,
-                                                          "multiview_platform/tests/temp_tests/")
-        pickled_randomState = execution.initRandomState(
-            "multiview_platform/tests/temp_tests/random_state.pickle",
-            "multiview_platform/tests/temp_tests/")
-        os.remove("multiview_platform/tests/temp_tests/random_state.pickle")
-=======
         randomState = execution.init_random_state("42",
                                                 "multiview_platform/tests/tmp_tests/")
-        os.remove("multiview_platform/tests/tmp_tests/randomState.pickle")
+        os.remove("multiview_platform/tests/tmp_tests/random_state.pickle")
         np.testing.assert_array_equal(randomState.beta(1, 100, 100),
                                       randomState_42.beta(1, 100, 100))
 
@@ -121,10 +96,9 @@ class Test_initRandomState(unittest.TestCase):
         randomState_to_pickle = execution.init_random_state(None,
                                                           "multiview_platform/tests/tmp_tests/")
         pickled_randomState = execution.init_random_state(
-            "multiview_platform/tests/tmp_tests/randomState.pickle",
+            "multiview_platform/tests/tmp_tests/random_state.pickle",
             "multiview_platform/tests/tmp_tests/")
-        os.remove("multiview_platform/tests/tmp_tests/randomState.pickle")
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+        os.remove("multiview_platform/tests/tmp_tests/random_state.pickle")
         np.testing.assert_array_equal(randomState_to_pickle.beta(1, 100, 100),
                                       pickled_randomState.beta(1, 100, 100))
 
diff --git a/multiview_platform/tests/test_utils/test_multiclass.py b/multiview_platform/tests/test_utils/test_multiclass.py
index 3a9578867ec438e1748a59a8a70cd93224af7b40..dcb764267646f224c20ac14468a046d101d07219 100644
--- a/multiview_platform/tests/test_utils/test_multiclass.py
+++ b/multiview_platform/tests/test_utils/test_multiclass.py
@@ -21,13 +21,8 @@ class Test_genMulticlassLabels(unittest.TestCase):
              cls.testIndices[1]]]
 
     def test_one_versus_one(cls):
-<<<<<<< HEAD
-        multiclassLabels, labelsIndices, oldIndicesMulticlass = multiclass.genMulticlassLabels(
-            cls.labels, "oneVersusOne", cls.classification_indices)
-=======
         multiclassLabels, labelsIndices, oldIndicesMulticlass = multiclass.gen_multiclass_labels(
-            cls.labels, "oneVersusOne", cls.classificationIndices)
->>>>>>> 7b3e918b4fb2938657cae3093d95b1bd6fc461d4
+            cls.labels, "oneVersusOne", cls.classification_indices)
         cls.assertEqual(len(multiclassLabels), 10)
         cls.assertEqual(labelsIndices,
                         [(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4),