diff --git a/config_files/config.yml b/config_files/config.yml
index b22cd1855bdc29cf024e1b6d565b464ce07abff2..318ef51d36e0a6d7fe96dc0201c48c638fb12bbd 100644
--- a/config_files/config.yml
+++ b/config_files/config.yml
@@ -143,3 +143,36 @@ weighted_linear_early_fusion:
       max_depth: [1]
       criterion: ["gini"]
       splitter: ["best"]
+
+entropy_fusion:
+  classifier_names: ["decison_tree"]
+  classifier_configs:
+    decision_tree:
+      max_depth: [1]
+      criterion: ["gini"]
+      splitter: ["best"]
+
+disagree_fusion:
+  classifier_names: ["decison_tree"]
+  classifier_configs:
+    decision_tree:
+      max_depth: [1]
+      criterion: ["gini"]
+      splitter: ["best"]
+
+
+double_fault_fusion:
+  classifier_names: ["decison_tree"]
+  classifier_configs:
+    decision_tree:
+      max_depth: [1]
+      criterion: ["gini"]
+      splitter: ["best"]
+
+difficulty_fusion:
+  classifier_names: ["decison_tree"]
+  classifier_configs:
+    decision_tree:
+      max_depth: [1]
+      criterion: ["gini"]
+      splitter: ["best"]
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/additions/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview/additions/__init__.py
index ded01232c360476be91c1eeba56bcb76af045be6..8b137891791fe96927ad78e64b0aad7bded08bdc 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/additions/__init__.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/additions/__init__.py
@@ -1 +1 @@
-from . import diversity_utils
+
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py
deleted file mode 100644
index 9cf1ba574b9acfe3e0374a67313704f9bf1fe3e5..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview/additions/diversity_utils.py
+++ /dev/null
@@ -1,317 +0,0 @@
-import itertools
-import math
-import os
-
-import numpy as np
-
-from ...utils.multiclass import is_biclass, gen_multiclass_monoview_decision
-
-
-def getClassifiersDecisions(allClassifersNames, viewsIndices, resultsMonoview):
-    """
-    This function gets the monoview classifiers decisions from resultsMonoview.
-    If no HP optimization is done, there is just one fold, the training set.
-    The classifiersDecisions variable is ordered as :
-    classifiersDecisions[viewIndex, classifierIndex, foldIndex, exampleIndex]
-    And the classifiersNames variable is ordered as :
-    classifiersNames[viewIndex][classifierIndex]
-    """
-    nbViews = len(viewsIndices)
-    nbClassifiers = len(allClassifersNames)
-    classifiersNames = [[] for _ in viewsIndices]
-    more_than_one_fold = len(resultsMonoview[0].test_folds_preds.shape) is not 1
-    if more_than_one_fold:
-        nbFolds = resultsMonoview[0].test_folds_preds.shape[0]
-        foldsLen = resultsMonoview[0].test_folds_preds.shape[1]
-    else:
-        nbFolds = 1
-        foldsLen = resultsMonoview[0].test_folds_preds.shape[0]
-
-    classifiersDecisions = np.zeros((nbViews, nbClassifiers, nbFolds, foldsLen))
-
-    for resultMonoview in resultsMonoview:
-        if resultMonoview.classifier_name in classifiersNames[
-            viewsIndices.index(resultMonoview.view_index)]:
-            pass
-        else:
-            classifiersNames[
-                viewsIndices.index(resultMonoview.view_index)].append(
-                resultMonoview.classifier_name)
-        classifierIndex = classifiersNames[
-            viewsIndices.index(resultMonoview.view_index)].index(
-            resultMonoview.classifier_name)
-        classifiersDecisions[viewsIndices.index(
-            resultMonoview.view_index), classifierIndex] = resultMonoview.test_folds_preds
-    # else:
-    #     train_len = resultsMonoview[0].test_folds_preds.shape[0]
-    #     classifiersDecisions = np.zeros((nbViews, nbClassifiers, 1, train_len))
-    #     for resultMonoview in resultsMonoview:
-    #         if resultMonoview.classifier_name in classifiersNames[viewsIndices.index(resultMonoview[0])]:
-    #             pass
-    #         else:
-    #             classifiersNames[viewsIndices.index(resultMonoview[0])].append(resultMonoview[1][0])
-    #         classifierIndex = classifiersNames[viewsIndices.index(resultMonoview[0])].index(resultMonoview[1][0])
-    #         classifiersDecisions[viewsIndices.index(resultMonoview[0]), classifierIndex] = resultMonoview[1][6]
-    return classifiersDecisions, classifiersNames
-
-
-def couple_div_measure(classifiersNames, classifiersDecisions, measurement,
-                       foldsGroudTruth):
-    """
-    This function is used to get the max of a couple diversity measurement,passed as an argument
-    It generates all possible combinations and all the couples to estimate the diversity on a combination
-    The best combination is the one that maximize the measurement.
-    """
-
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
-                         enumerate(combination)]
-        binomes = itertools.combinations(combiWithView, 2)
-        nbBinomes = int(
-            math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
-        couple_diversities = np.zeros(nbBinomes)
-        for binomeIndex, binome in enumerate(binomes):
-            (viewIndex1, classifierIndex1), (
-            viewIndex2, classifierIndex2) = binome
-            folds_couple_diversity = np.mean(
-                measurement(classifiersDecisions[viewIndex1, classifierIndex1],
-                            classifiersDecisions[viewIndex2, classifierIndex2],
-                            foldsGroudTruth)
-                , axis=1)
-            couple_diversities[binomeIndex] = np.mean(folds_couple_diversity)
-        div_measure[combinationsIndex] = np.mean(couple_diversities)
-    bestCombiIndex = np.argmax(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[bestCombiIndex]
-
-
-def global_div_measure(classifiersNames, classifiersDecisions, measurement,
-                       foldsGroudTruth):
-    """
-    This function is used to get the max of a diversity measurement,passed as an argument
-    It generates all possible combinations to estimate the diversity on a combination
-    The best combination is the one that maximize the measurement.
-    """
-
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(
-            nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        div_measure[combinationsIndex] = measurement(classifiersDecisions,
-                                                     combination,
-                                                     foldsGroudTruth, foldsLen)
-    bestCombiIndex = np.argmax(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[
-               bestCombiIndex]
-
-
-def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
-                   foldsGroudTruth):
-    """
-    This function is used to measure a pseudo-CQ measurement based on the minCq algorithm.
-    It's a mix between couple_div_measure and global_div_measure that uses multiple measurements.
-    """
-    nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-    combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-                                                           nbViews)
-    nbCombinations = int(
-        math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-            nbViews) / math.factorial(nbClassifiers - 1))
-    div_measure = np.zeros(nbCombinations)
-    combis = np.zeros((nbCombinations, nbViews), dtype=int)
-
-    for combinationsIndex, combination in enumerate(combinations):
-        combis[combinationsIndex] = combination
-        combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
-                         enumerate(combination)]
-        binomes = itertools.combinations(combiWithView, 2)
-        nbBinomes = int(
-            math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
-        disagreement = np.zeros(nbBinomes)
-        div_measure[combinationsIndex] = measurement[1](classifiersDecisions,
-                                                        combination,
-                                                        foldsGroudTruth,
-                                                        foldsLen)
-        for binomeIndex, binome in enumerate(binomes):
-            (viewIndex1, classifierIndex1), (
-            viewIndex2, classifierIndex2) = binome
-            nbDisagree = np.sum(measurement[0](
-                classifiersDecisions[viewIndex1, classifierIndex1],
-                classifiersDecisions[viewIndex2, classifierIndex2],
-                foldsGroudTruth)
-                                , axis=1) / float(foldsLen)
-            disagreement[binomeIndex] = np.mean(nbDisagree)
-        div_measure[combinationsIndex] /= float(np.mean(disagreement))
-    bestCombiIndex = np.argmin(div_measure)
-    bestCombination = combis[bestCombiIndex]
-
-    return [classifiersNames[viewIndex][index] for viewIndex, index in
-            enumerate(bestCombination)], div_measure[
-               bestCombiIndex]
-
-
-def getFoldsGroundTruth(directory, folds=True):
-    """This function is used to get the labels of each fold example used in the measurements
-    foldsGroundTruth is formatted as
-    foldsGroundTruth[foldIndex, exampleIndex]"""
-    if folds:
-        foldsFilesNames = os.listdir(directory + "folds/")
-        foldLen = len(np.genfromtxt(directory + "folds/" + foldsFilesNames[0],
-                                    delimiter=','))
-        foldsGroudTruth = np.zeros((len(foldsFilesNames), foldLen), dtype=int)
-        for fileName in foldsFilesNames:
-            foldIndex = int(fileName[-5])
-            foldsGroudTruth[foldIndex] = np.genfromtxt(
-                directory + "folds/" + fileName, delimiter=',')[:foldLen]
-        return foldsGroudTruth
-    else:
-        train_labels = np.genfromtxt(directory + "train_labels.csv",
-                                     delimiter=',')
-        foldsGroudTruth = np.zeros((1, train_labels.shape[0]))
-        foldsGroudTruth[0] = train_labels
-        return foldsGroudTruth
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState,
-            directory, resultsMonoview, classificationIndices, measurement,
-            name):
-    """This function is a general function to get the args for all the measurements used"""
-    if len(resultsMonoview[0].test_folds_preds.shape) is not 1:
-        foldsGroundTruth = getFoldsGroundTruth(directory, folds=True)
-    else:
-        foldsGroundTruth = getFoldsGroundTruth(directory, folds=False)
-    monoviewClassifierModulesNames = benchmark["monoview"]
-    classifiersDecisions, classifiersNames = getClassifiersDecisions(
-        monoviewClassifierModulesNames,
-        viewsIndices,
-        resultsMonoview)
-    if name in ['disagree_fusion', 'double_fault_fusion']:
-        classifiersNames, div_measure = couple_div_measure(classifiersNames,
-                                                           classifiersDecisions,
-                                                           measurement,
-                                                           foldsGroundTruth)
-    elif name == "pseudo_cq_fusion":
-        classifiersNames, div_measure = CQ_div_measure(classifiersNames,
-                                                       classifiersDecisions,
-                                                       measurement,
-                                                       foldsGroundTruth)
-    else:
-        classifiersNames, div_measure = global_div_measure(classifiersNames,
-                                                           classifiersDecisions,
-                                                           measurement,
-                                                           foldsGroundTruth)
-    multiclass_preds = [monoviewResult.y_test_multiclass_pred for monoviewResult
-                        in resultsMonoview]
-    if isBiclass(multiclass_preds):
-        monoviewDecisions = np.array(
-            [monoviewResult.full_labels_pred for monoviewResult in
-             resultsMonoview
-             if
-             classifiersNames[viewsIndices.index(monoviewResult.view_index)] ==
-             monoviewResult.classifier_name])
-    else:
-        monoviewDecisions = np.array(
-            [genMulticlassMonoviewDecision(monoviewResult,
-                                           classificationIndices) for
-             monoviewResult in
-             resultsMonoview if classifiersNames[viewsIndices.index(
-                monoviewResult.view_index)] == monoviewResult.classifier_name])
-    argumentsList = []
-    arguments = {"CL_type": name,
-                 "views": views,
-                 "NB_VIEW": len(views),
-                 "viewsIndices": viewsIndices,
-                 "NB_CLASS": len(args.CL_classes),
-                 "LABELS_NAMES": args.CL_classes,
-                 name + "KWARGS": {
-                     "weights": args.DGF_weights,
-                     "classifiersNames": classifiersNames,
-                     "monoviewDecisions": monoviewDecisions,
-                     "nbCLass": len(args.CL_classes),
-                     "div_measure": div_measure
-                 }
-                 }
-    argumentsList.append(arguments)
-    return argumentsList
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    """Used to generate parameters sets for the random hyper parameters optimization function"""
-    weights = [
-        randomState.random_sample(len(classificationKWARGS["classifiersNames"]))
-        for _ in range(nIter)]
-    nomralizedWeights = [[weightVector / np.sum(weightVector)] for weightVector
-                         in weights]
-    return nomralizedWeights
-
-
-class DiversityFusionClass:
-    """This is a parent class for all the diversity fusion based classifiers."""
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        """Used to init the instances"""
-        if kwargs["weights"] == []:
-            self.weights = [1.0 / len(kwargs["classifiersNames"]) for _ in
-                            range(len(kwargs["classifiersNames"]))]
-        else:
-            self.weights = np.array(kwargs["weights"]) / np.sum(
-                np.array(kwargs["weights"]))
-        self.monoviewDecisions = kwargs["monoviewDecisions"]
-        self.classifiersNames = kwargs["classifiersNames"]
-        self.nbClass = kwargs["nbCLass"]
-        self.div_measure = kwargs["div_measure"]
-
-    def setParams(self, paramsSet):
-        """ Used to set the weights"""
-        self.weights = paramsSet[0]
-
-    def fit_hdf5(self, DATASET, labels, trainIndices=None, viewsIndices=None,
-                 metric=["f1_score", None]):
-        """No need to fit as the monoview classifiers are already fitted"""
-        pass
-
-    def predict_hdf5(self, DATASET, usedIndices=None, viewsIndices=None):
-        """Just a weighted majority vote"""
-        if usedIndices is None:
-            usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        votes = np.zeros((len(usedIndices), self.nbClass), dtype=float)
-        for usedIndex, exampleIndex in enumerate(usedIndices):
-            for monoviewDecisionIndex, monoviewDecision in enumerate(
-                    self.monoviewDecisions):
-                votes[usedIndex, monoviewDecision[
-                    exampleIndex]] += 1  # self.weights[monoviewDecisionIndex]
-        predictedLabels = np.argmax(votes, axis=1)
-        return predictedLabels
-
-    def predict_probas_hdf5(self, DATASET, usedIndices=None):
-        pass
-
-    def getConfigString(self, classificationKWARGS):
-        return "weights : " + ", ".join(map(str, list(self.weights)))
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : " + ', '.join(
-            self.classifiersNames)
-        return stringAnalysis
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
index dd4767d557d8b05d4d1d8a3676d9d162e1e12364..3370cd9938477baac14c645812a26d56045c9ae1 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
@@ -84,15 +84,13 @@ class BaseMultiviewClassifier(BaseEstimator, ClassifierMixin):
         return "No detailed interpretation function"
 
 
-
-
-def get_train_views_indices(dataset, train_indices, view_indices,):
+def get_examples_views_indices(dataset, examples_indices, view_indices, ):
     """This function  is used to get all the examples indices and view indices if needed"""
     if view_indices is None:
         view_indices = np.arange(dataset["Metadata"].attrs["nbView"])
-    if train_indices is None:
-        train_indices = range(dataset["Metadata"].attrs["datasetLength"])
-    return train_indices, view_indices
+    if examples_indices is None:
+        examples_indices = range(dataset["Metadata"].attrs["datasetLength"])
+    return examples_indices, view_indices
 
 
 class ConfigGenerator():
@@ -100,8 +98,7 @@ class ConfigGenerator():
     def __init__(self, classifier_names):
         self.distribs = {}
         for classifier_name in classifier_names:
-            classifier_module = getattr(monoview_classifiers, classifier_name)
-            classifier_class = getattr(classifier_module, classifier_module.classifier_class_name)
+            classifier_class = get_monoview_classifier(classifier_name)
             self.distribs[classifier_name] = dict((param_name, param_distrib)
                                   for param_name, param_distrib in
                                   zip(classifier_class().param_names,
@@ -118,3 +115,15 @@ class ConfigGenerator():
                     config_sample[classifier_name][
                         param_name] = param_distrib[random_state.randint(len(param_distrib))]
         return config_sample
+
+
+def get_available_monoview_classifiers():
+    classifiers_names = [module_name
+                         for module_name in dir(monoview_classifiers)
+                         if not module_name.startswith("__")]
+    return classifiers_names
+
+def get_monoview_classifier(classifier_name):
+    classifier_module = getattr(monoview_classifiers, classifier_name)
+    classifier_class = getattr(classifier_module, classifier_module.classifier_class_name)
+    return classifier_class
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a72d82aa1370d82c72234e5f7c694872b283f882
--- /dev/null
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
@@ -0,0 +1,194 @@
+import itertools
+import math
+import inspect
+import os
+
+import numpy as np
+
+from ...multiview.multiview_utils import ConfigGenerator, \
+    get_monoview_classifier, get_examples_views_indices, \
+    get_available_monoview_classifiers, BaseMultiviewClassifier
+
+
+class DiversityFusion(BaseMultiviewClassifier):
+    """This is the base class for all the diversity fusion based classifiers."""
+
+    def __init__(self, random_state=None, classifier_names=None,
+                 monoview_estimators=None, classifiers_configs=None):
+        """Used to init the instances"""
+        super(DiversityFusion, self).__init__(random_state)
+        if classifier_names is None:
+            classifier_names = get_available_monoview_classifiers()
+        self.classifier_names = classifier_names
+        self.param_names = ["classifiers_configs"]
+        self.distribs = [ConfigGenerator(get_available_monoview_classifiers())]
+        self.estimator_pool = monoview_estimators
+        self.classifiers_configs = classifiers_configs
+
+    def fit(self, X, y, train_indices=None, views_indices=None):
+        train_indices, views_indices = get_examples_views_indices(X,
+                                                                  train_indices,
+                                                                  views_indices)
+        if self.estimator_pool is None:
+            self.estimator_pool = []
+            for classifier_idx, classifier_name in enumerate(self.classifier_names):
+                self.estimator_pool.append([])
+                if self.classifiers_configs is not None and classifier_name in self.classifiers_configs:
+                    if 'random_state' in inspect.getfullargspec(get_monoview_classifier(classifier_name).__init__).args:
+                        estimator = get_monoview_classifier(classifier_name)(random_state=self.random_state,
+                                                                             **self.classifiers_configs[classifier_name])
+                    else:
+                        estimator = get_monoview_classifier(classifier_name)(
+                            **self.classifiers_configs[classifier_name])
+                else:
+                    if 'random_state' in inspect.getfullargspec(get_monoview_classifier(classifier_name).__init__).args:
+                        estimator = get_monoview_classifier(classifier_name)(random_state=self.random_state)
+                    else:
+                        estimator = get_monoview_classifier(classifier_name)()
+                for idx, view_idx in enumerate(views_indices):
+                    estimator.fit(X.get_v(view_idx, train_indices), y[train_indices])
+                    self.estimator_pool[classifier_idx].append(estimator)
+        else:
+            pass #Todo
+        self.monoview_estimators = self.choose_combination(X, y, train_indices, views_indices)
+        return self
+
+    def predict(self, X, example_indices=None, views_indices=None):
+        """Just a weighted majority vote"""
+        example_indices, views_indices = get_examples_views_indices(X,
+                                                                    example_indices,
+                                                                    views_indices)
+        nb_class = X.get_nb_class(example_indices)
+        votes = np.zeros((len(example_indices), nb_class), dtype=float)
+        monoview_predictions = [monoview_estimator.predict(X.get_v(view_idx, example_indices))
+                                for view_idx, monoview_estimator
+                                in zip(views_indices, self.monoview_estimators)]
+        for idx, example_index in enumerate(example_indices):
+            for monoview_estimator_index, monoview_prediciton in enumerate(monoview_predictions):
+                votes[idx, monoview_prediciton[
+                    example_index]] += 1
+        predicted_labels = np.argmax(votes, axis=1)
+        return predicted_labels
+
+    def get_classifiers_decisions(self, X, view_indices, examples_indices):
+        classifiers_decisions = np.zeros((len(self.estimator_pool),
+                                              len(view_indices),
+                                              len(examples_indices)))
+        for estimator_idx, estimator in enumerate(self.estimator_pool):
+            for idx, view_index in enumerate(view_indices):
+                classifiers_decisions[estimator_idx, idx, :] = estimator[
+                    idx].predict(X.get_v(view_index, examples_indices))
+        return classifiers_decisions
+
+    def init_combinations(self, X, example_indices, view_indices):
+        classifiers_decisions = self.get_classifiers_decisions(X, view_indices,
+                                                               example_indices)
+        nb_classifiers, nb_views, n_examples = classifiers_decisions.shape
+        combinations = itertools.combinations_with_replacement(
+            range(nb_classifiers),
+            nb_views)
+        nb_combinations = int(
+            math.factorial(nb_classifiers + nb_views - 1) / math.factorial(
+                nb_views) / math.factorial(
+                nb_classifiers - 1))
+        div_measure = np.zeros(nb_combinations)
+        combis = np.zeros((nb_combinations, nb_views), dtype=int)
+        return combinations, combis, div_measure, classifiers_decisions, nb_views
+
+
+class GlobalDiversityFusion(DiversityFusion):
+
+    def choose_combination(self, X, y, examples_indices, view_indices):
+        combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
+            X, examples_indices, view_indices)
+        for combinationsIndex, combination in enumerate(combinations):
+            combis[combinationsIndex] = combination
+            div_measure[combinationsIndex] = self.diversity_score(
+                classifiers_decisions,
+                combination,
+                y[examples_indices])
+        best_combi_index = np.argmax(div_measure)
+        best_combination = combis[best_combi_index]
+        self.monoview_estimators = [self.estimator_pool[classifier_index][view_index]
+                                    for view_index, classifier_index
+                                    in enumerate(best_combination)]
+
+
+class CoupleDiversityFusion(DiversityFusion):
+
+    def choose_combination(self, X, y, examples_indices, view_indices):
+        combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
+            X, examples_indices, view_indices)
+        for combinations_index, combination in enumerate(combinations):
+            combis[combinations_index] = combination
+            combi_with_view = [(viewIndex, combiIndex) for viewIndex, combiIndex
+                             in
+                             enumerate(combination)]
+            binomes = itertools.combinations(combi_with_view, 2)
+            nb_binomes = int(
+                math.factorial(nb_views) / 2 / math.factorial(nb_views - 2))
+            couple_diversities = np.zeros(nb_binomes)
+            for binome_index, binome in enumerate(binomes):
+                (view_index_1, classifier_index_1), (
+                    view_index_2, classifier_index_2) = binome
+                couple_diversity = np.mean(
+                    self.diversity_score(
+                        classifiers_decisions[view_index_1, classifier_index_1],
+                        classifiers_decisions[view_index_2, classifier_index_2],
+                        y[examples_indices])
+                    )
+                couple_diversities[binome_index] = couple_diversity
+            div_measure[combinations_index] = np.mean(couple_diversities)
+        best_combi_index = np.argmax(div_measure)
+        best_combination = combis[best_combi_index]
+        self.monoview_estimators = [self.estimator_pool[classifier_index][view_index]
+                                    for view_index, classifier_index
+                                    in enumerate(best_combination)]
+
+
+
+#
+# def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
+#                    foldsGroudTruth):
+#     """
+#     This function is used to measure a pseudo-CQ measurement based on the minCq algorithm.
+#     It's a mix between couple_div_measure and global_div_measure that uses multiple measurements.
+#     """
+#     nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
+#     combinations = itertools.combinations_with_replacement(range(nbClassifiers),
+#                                                            nbViews)
+#     nbCombinations = int(
+#         math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
+#             nbViews) / math.factorial(nbClassifiers - 1))
+#     div_measure = np.zeros(nbCombinations)
+#     combis = np.zeros((nbCombinations, nbViews), dtype=int)
+#
+#     for combinationsIndex, combination in enumerate(combinations):
+#         combis[combinationsIndex] = combination
+#         combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
+#                          enumerate(combination)]
+#         binomes = itertools.combinations(combiWithView, 2)
+#         nbBinomes = int(
+#             math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
+#         disagreement = np.zeros(nbBinomes)
+#         div_measure[combinationsIndex] = measurement[1](classifiersDecisions,
+#                                                         combination,
+#                                                         foldsGroudTruth,
+#                                                         foldsLen)
+#         for binomeIndex, binome in enumerate(binomes):
+#             (viewIndex1, classifierIndex1), (
+#             viewIndex2, classifierIndex2) = binome
+#             nbDisagree = np.sum(measurement[0](
+#                 classifiersDecisions[viewIndex1, classifierIndex1],
+#                 classifiersDecisions[viewIndex2, classifierIndex2],
+#                 foldsGroudTruth)
+#                                 , axis=1) / float(foldsLen)
+#             disagreement[binomeIndex] = np.mean(nbDisagree)
+#         div_measure[combinationsIndex] /= float(np.mean(disagreement))
+#     bestCombiIndex = np.argmin(div_measure)
+#     bestCombination = combis[bestCombiIndex]
+#
+#     return [classifiersNames[viewIndex][index] for viewIndex, index in
+#             enumerate(bestCombination)], div_measure[
+#                bestCombiIndex]
+#
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
index 8dbec755f0c66bf5549a4b94c81ff93ead587628..6d3c8a7b493dfdf2943e92e594e6f196c51449df 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
@@ -10,6 +10,7 @@ from sklearn.base import BaseEstimator, ClassifierMixin
 from sklearn.utils.validation import check_is_fitted
 
 from ... import metrics
+from ... import monoview_classifiers
 
 
 def get_names(classed_list):
@@ -72,3 +73,6 @@ def get_train_views_indices(dataset, train_indices, view_indices,):
     if train_indices is None:
         train_indices = range(dataset["Metadata"].attrs["datasetLength"])
     return train_indices, view_indices
+
+
+
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e45437746be19489f1552c67f173a73f6d45b53
--- /dev/null
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
@@ -0,0 +1,34 @@
+import numpy as np
+
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import GlobalDiversityFusion
+
+
+classifier_class_name = "DifficultyFusion"
+
+
+class DifficultyFusion(GlobalDiversityFusion):
+
+    def diversity_measure(self, classifiers_decisions, combination, y):
+
+        _, nb_view, nb_examples = classifiers_decisions.shape
+        scores = np.zeros((nb_view, nb_examples), dtype=int)
+        for view_index, classifier_index in enumerate(combination):
+            scores[view_index] = np.logical_not(
+                    np.logical_xor(classifiers_decisions[classifier_index,
+                                                         view_index],
+                                   y)
+                )
+        # difficulty_scores = np.sum(scores, axis=0)
+        # TODO : Check computing method
+        difficulty_score = np.mean(
+            np.var(
+                np.array([
+                             np.sum((scores==view_index), axis=1)/float(nb_view)
+                             for view_index in range(len(combination)+1)])
+                , axis=0)
+        )
+        return difficulty_score
+
+
+
+
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/__init__.py
deleted file mode 100644
index 5c0d0a9eb5152a3f5c88ac99fd0fc83b1982873f..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import difficulty_fusion, analyze_results
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/analyze_results.py
deleted file mode 100644
index 07cf0a7b2790efb3847b4895a5f1abb7e9c58a9c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/analyze_results.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from ...multiview import analyze_results
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def execute(classifier, trainLabels,
-            testLabels, DATASET,
-            classificationKWARGS, classificationIndices,
-            LABELS_DICTIONARY, views, nbCores, times,
-            name, KFolds,
-            hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels, classifierModule):
-    return analyze_results.execute(classifier, trainLabels,
-                                   testLabels, DATASET,
-                                   classificationKWARGS, classificationIndices,
-                                   LABELS_DICTIONARY, views, nbCores, times,
-                                   name, KFolds,
-                                   hyperParamSearch, nIter, metrics,
-                                   viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/difficulty_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/difficulty_fusion.py
deleted file mode 100644
index 007f9f009fce9beaf2877243ade81dfb86374d14..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion/difficulty_fusion.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import numpy as np
-
-from ...multiview.additions import diversity_utils
-
-
-def genName(config):
-    return "difficulty_fusion"
-
-
-def getBenchmark(benchmark, args=None):
-    benchmark["multiview"]["difficulty_fusion"] = ["take_everything"]
-    return benchmark
-
-
-def difficulty(classifiersDecisions, combination, foldsGroudTruth, foldsLen):
-    nbView, _, nbFolds, nbExamples = classifiersDecisions.shape
-    scores = np.zeros((nbView, nbFolds, nbExamples), dtype=int)
-    for viewIndex, classifierIndex in enumerate(combination):
-        scores[viewIndex] = np.logical_not(
-            np.logical_xor(classifiersDecisions[viewIndex, classifierIndex],
-                           foldsGroudTruth)
-        )
-    difficulty_scores = np.sum(scores, axis=0)
-    difficulty_score = np.mean(
-        np.var(
-            np.array([
-                         np.sum((difficulty_scores==viewIndex), axis=1)/float(nbView)
-                         for viewIndex in range(len(combination)+1)])
-            , axis=0)
-    )
-    return difficulty_score
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState, directory, resultsMonoview, classificationIndices):
-    return diversity_utils.getArgs(args, benchmark, views,
-                                   viewsIndices, randomState, directory,
-                                   resultsMonoview, classificationIndices,
-                                   difficulty, "difficulty_fusion")
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    return diversity_utils.genParamsSets(classificationKWARGS, randomState, nIter=nIter)
-
-
-
-class DifficultyFusionClass(diversity_utils.DiversityFusionClass):
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        diversity_utils.DiversityFusionClass.__init__(self, randomState, NB_CORES=1, **kwargs)
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : "+ ', '.join(self.classifiersNames)+\
-                         ', with a difficulty of '+str(self.div_measure)
-        return stringAnalysis
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..445e4f8532e1557cd4aa9f16efc49f923c9b5342
--- /dev/null
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
@@ -0,0 +1,12 @@
+import numpy as np
+
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import CoupleDiversityFusion
+
+
+classifier_class_name = "DisagreeFusion"
+
+
+class DisagreeFusion(CoupleDiversityFusion):
+
+    def diversity_measure(self, first_classifier_decision, second_classifier_decision, _):
+        return np.logical_xor(first_classifier_decision, second_classifier_decision)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/__init__.py
deleted file mode 100644
index b8459c144e15702c25f6e01a4418aabed408b6a3..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import disagree_fusion,analyze_results
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/analyze_results.py
deleted file mode 100644
index 07cf0a7b2790efb3847b4895a5f1abb7e9c58a9c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/analyze_results.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from ...multiview import analyze_results
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def execute(classifier, trainLabels,
-            testLabels, DATASET,
-            classificationKWARGS, classificationIndices,
-            LABELS_DICTIONARY, views, nbCores, times,
-            name, KFolds,
-            hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels, classifierModule):
-    return analyze_results.execute(classifier, trainLabels,
-                                   testLabels, DATASET,
-                                   classificationKWARGS, classificationIndices,
-                                   LABELS_DICTIONARY, views, nbCores, times,
-                                   name, KFolds,
-                                   hyperParamSearch, nIter, metrics,
-                                   viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/disagree_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/disagree_fusion.py
deleted file mode 100644
index 035bd22f350071864b7ee03ee2e122106b6c0059..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion/disagree_fusion.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import numpy as np
-
-from ...multiview.additions import diversity_utils
-
-
-def genName(config):
-    return "disagree_fusion"
-
-
-def getBenchmark(benchmark, args=None):
-    benchmark["multiview"]["disagree_fusion"] = ["take_everything"]
-    return benchmark
-
-
-def disagree(classifierDecision1, classifierDecision2, ground_truth):
-    return np.logical_xor(classifierDecision1, classifierDecision2)
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState, directory, resultsMonoview, classificationIndices):
-    return diversity_utils.getArgs(args, benchmark, views, viewsIndices,
-                                   randomState, directory, resultsMonoview,
-                                   classificationIndices, disagree, "disagree_fusion")
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    return diversity_utils.genParamsSets(classificationKWARGS, randomState, nIter=nIter)
-    # """Used to generate parameters sets for the random hyper parameters optimization function"""
-    # weights = [randomState.random_sample(len(classificationKWARGS["classifiersNames"])) for _ in range(nIter)]
-    # nomralizedWeights = [[weightVector/np.sum(weightVector)] for weightVector in weights]
-    # return nomralizedWeights
-
-
-class DisagreeFusionClass(diversity_utils.DiversityFusionClass):
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        diversity_utils.DiversityFusionClass.__init__(self, randomState, NB_CORES=1, **kwargs)
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : "+ ', '.join(self.classifiersNames)+\
-                         ', with a disagreement of '+str(self.div_measure)
-        return stringAnalysis
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ad8b6ae073f999642d5e987ea6aff57c9327c6
--- /dev/null
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
+    CoupleDiversityFusion
+
+classifier_class_name = "DoubleFaultFusion"
+
+
+class DoubleFaultFusion(CoupleDiversityFusion):
+
+    def diversity_measure(self, first_classifier_decision,
+                          second_classifier_decision, y):
+        return np.logical_and(np.logical_xor(first_classifier_decision, y),
+                              np.logical_xor(second_classifier_decision, y))
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/__init__.py
deleted file mode 100644
index bb9fd7705bad32bf53c4e5588f49d0e3846065b9..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import analyze_results, double_fault_fusion
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/analyze_results.py
deleted file mode 100644
index 07cf0a7b2790efb3847b4895a5f1abb7e9c58a9c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/analyze_results.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from ...multiview import analyze_results
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def execute(classifier, trainLabels,
-            testLabels, DATASET,
-            classificationKWARGS, classificationIndices,
-            LABELS_DICTIONARY, views, nbCores, times,
-            name, KFolds,
-            hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels, classifierModule):
-    return analyze_results.execute(classifier, trainLabels,
-                                   testLabels, DATASET,
-                                   classificationKWARGS, classificationIndices,
-                                   LABELS_DICTIONARY, views, nbCores, times,
-                                   name, KFolds,
-                                   hyperParamSearch, nIter, metrics,
-                                   viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/double_fault_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/double_fault_fusion.py
deleted file mode 100644
index a08e48073057376daf7e0545a4bac8d6a454d07d..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion/double_fault_fusion.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import numpy as np
-
-from ...multiview.additions import diversity_utils
-
-
-def genName(config):
-    return "double_fault_fusion"
-
-
-def getBenchmark(benchmark, args=None):
-    benchmark["multiview"]["double_fault_fusion"] = ["take_everything"]
-    return benchmark
-
-
-def doubleFault(classifierDecision1, classifierDecision2, ground_truth):
-    return np.logical_and(np.logical_xor(classifierDecision1, ground_truth),
-                          np.logical_xor(classifierDecision2, ground_truth))
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState, directory, resultsMonoview, classificationIndices):
-    return diversity_utils.getArgs(args, benchmark, views,
-                                   viewsIndices, randomState, directory,
-                                   resultsMonoview, classificationIndices,
-                                   doubleFault, "double_fault_fusion")
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    return diversity_utils.genParamsSets(classificationKWARGS, randomState, nIter=nIter)
-
-
-
-class DoubleFaultFusionClass(diversity_utils.DiversityFusionClass):
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        diversity_utils.DiversityFusionClass.__init__(self, randomState, NB_CORES=1, **kwargs)
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : "+ ', '.join(self.classifiersNames)+\
-                         ', with a double fault ratio of '+str(self.div_measure)
-        return stringAnalysis
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..54986816d569bc9ec5761dafe3dced75467fa766
--- /dev/null
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
@@ -0,0 +1,24 @@
+import numpy as np
+
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import GlobalDiversityFusion
+
+
+classifier_class_name = "EntropyFusion"
+
+
+class EntropyFusion(GlobalDiversityFusion):
+
+    def diversity_measure(self, classifiers_decisions, combination, y):
+        _, nb_view, nb_examples = classifiers_decisions.shape
+        scores = np.zeros((nb_view, nb_examples), dtype=int)
+        for view_index, classifier_index in enumerate(combination):
+            scores[view_index] = np.logical_not(
+                np.logical_xor(classifiers_decisions[classifier_index, view_index],
+                               y)
+            )
+        entropy_scores = np.sum(scores, axis=0)
+        nb_view_matrix = np.zeros((nb_examples),
+                                dtype=int) + nb_view - entropy_scores
+        entropy_score = np.mean(np.minimum(entropy_scores, nb_view_matrix).astype(float) / (
+                        nb_view - int(nb_view / 2)))
+        return entropy_score
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/__init__.py
deleted file mode 100644
index f111304ec1a97d3f03b21250b681f0da2c1c7be3..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import analyze_results, entropy_fusion
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/analyze_results.py
deleted file mode 100644
index 07cf0a7b2790efb3847b4895a5f1abb7e9c58a9c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/analyze_results.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from ...multiview import analyze_results
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def execute(classifier, trainLabels,
-            testLabels, DATASET,
-            classificationKWARGS, classificationIndices,
-            LABELS_DICTIONARY, views, nbCores, times,
-            name, KFolds,
-            hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels, classifierModule):
-    return analyze_results.execute(classifier, trainLabels,
-                                   testLabels, DATASET,
-                                   classificationKWARGS, classificationIndices,
-                                   LABELS_DICTIONARY, views, nbCores, times,
-                                   name, KFolds,
-                                   hyperParamSearch, nIter, metrics,
-                                   viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/entropy_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/entropy_fusion.py
deleted file mode 100644
index eb467c8f9c8684a02fbd42d69c33cb1001ad2820..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion/entropy_fusion.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import numpy as np
-
-from ...multiview.additions import diversity_utils
-
-
-def genName(config):
-    return "entropy_fusion"
-
-
-def getBenchmark(benchmark, args=None):
-    benchmark["multiview"]["entropy_fusion"] = ["take_everything"]
-    return benchmark
-
-
-def entropy(classifiersDecisions, combination, foldsGroudTruth, foldsLen):
-    nbView, _, nbFolds, nbExamples = classifiersDecisions.shape
-    scores = np.zeros((nbView, nbFolds, nbExamples), dtype=int)
-    for viewIndex, classifierIndex in enumerate(combination):
-        scores[viewIndex] = np.logical_not(
-            np.logical_xor(classifiersDecisions[viewIndex, classifierIndex],
-                           foldsGroudTruth)
-        )
-    entropy_scores = np.sum(scores, axis=0)
-    nbViewMatrix = np.zeros((nbFolds, nbExamples), dtype=int)+nbView-entropy_scores
-    entropy_score = np.mean(np.mean(np.minimum(entropy_scores, nbViewMatrix).astype(float)/(nbView - int(nbView/2)), axis=1))
-    return entropy_score
-
-
-def getArgs(args, benchmark, views, viewsIndices, randomState, directory, resultsMonoview, classificationIndices):
-    return diversity_utils.getArgs(args, benchmark, views,
-                                   viewsIndices, randomState, directory,
-                                   resultsMonoview, classificationIndices,
-                                   entropy, "entropy_fusion")
-
-
-def genParamsSets(classificationKWARGS, randomState, nIter=1):
-    return diversity_utils.genParamsSets(classificationKWARGS, randomState, nIter=nIter)
-
-
-
-class EntropyFusionClass(diversity_utils.DiversityFusionClass):
-
-    def __init__(self, randomState, NB_CORES=1, **kwargs):
-        diversity_utils.DiversityFusionClass.__init__(self, randomState, NB_CORES=1, **kwargs)
-
-    def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : "+ ', '.join(self.classifiersNames)+\
-                         ', with an entropy of '+str(self.div_measure)
-        return stringAnalysis
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/pseudo_cq_fusion/pseudo_cq_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/pseudo_cq_fusion/pseudo_cq_fusion.py
index 441593f5a8ac8355e875541de463464be005e47c..4d99138009f05aacf95cf5991f66f4fdbf4980b7 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/pseudo_cq_fusion/pseudo_cq_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/pseudo_cq_fusion/pseudo_cq_fusion.py
@@ -1,6 +1,7 @@
-from ...multiview.additions import diversity_utils
-from ..difficulty_fusion.difficulty_fusion import difficulty
-from ..double_fault_fusion.double_fault_fusion import doubleFault
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions import \
+    diversity_utils
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion_old import difficulty
+from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion_old import doubleFault
 
 
 def genName(config):
@@ -34,6 +35,6 @@ class PseudoCQFusionClass(diversity_utils.DiversityFusionClass):
         diversity_utils.DiversityFusionClass.__init__(self, randomState, NB_CORES=1, **kwargs)
 
     def getSpecificAnalysis(self, classificationKWARGS):
-        stringAnalysis = "Classifiers used for each view : "+ ', '.join(self.classifiersNames)+\
-                         ', with a pseudo CQ of '+str(self.div_measure)
+        stringAnalysis = "Classifiers used for each view : " + ', '.join(self.classifiers_names) +\
+                         ', with a pseudo CQ of ' + str(self.div_measure)
         return stringAnalysis
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
index e3b93cf133397e36085a8be638408a06fc44bdf4..25bd9c3e95dd2fadd5f556c55bb1423aeb630aa4 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
@@ -2,7 +2,8 @@ import numpy as np
 import inspect
 
 from ..utils.dataset import get_v
-from ..multiview.multiview_utils import BaseMultiviewClassifier, get_train_views_indices, ConfigGenerator
+from ..multiview.multiview_utils import BaseMultiviewClassifier, get_examples_views_indices, ConfigGenerator, get_available_monoview_classifiers
+
 
 from .. import monoview_classifiers
 
@@ -28,11 +29,8 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier):
         self.monoview_classifier = monoview_classifier_class(random_state=random_state,
                                                              **self.monoview_classifier_config)
         self.param_names = ["monoview_classifier_name", "monoview_classifier_config"]
-        classifier_names = []
-        for module_name in dir(monoview_classifiers):
-            if not module_name.startswith("__"):
-                classifier_names.append(module_name)
-        self.distribs = [classifier_names, ConfigGenerator(classifier_names)]
+        self.distribs = [get_available_monoview_classifiers(),
+                         ConfigGenerator(get_available_monoview_classifiers())]
         self.classed_params = []
         self.weird_strings={}
 
@@ -56,6 +54,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier):
     def fit(self, X, y, train_indices=None, view_indices=None):
         train_indices, X = self.transform_data_to_monoview(X, train_indices, view_indices)
         self.monoview_classifier.fit(X, y[train_indices])
+        return self
 
     def predict(self, X, predict_indices=None, view_indices=None):
         _, X = self.transform_data_to_monoview(X, predict_indices, view_indices)
@@ -65,7 +64,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier):
     def transform_data_to_monoview(self, dataset, example_indices, view_indices):
         """Here, we extract the data from the HDF5 dataset file and store all
         the concatenated views in one variable"""
-        example_indices, self.view_indices = get_train_views_indices(dataset,
+        example_indices, self.view_indices = get_examples_views_indices(dataset,
                                                                         example_indices,
                                                                         view_indices)
         if self.view_weights is None or self.view_weights=="None":
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
index c236a6a6b1601cdae70f689e8588126f203eab11..c889c251b82f904655f90d7565fd84dc805dc74f 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
@@ -10,6 +10,44 @@ from scipy import sparse
 from . import get_multiview_db as DB
 
 
+class Dataset():
+
+    def __init__(self, dataset):
+        self.dataset = dataset
+
+    def init_example_indces(self, example_indices=None):
+        if example_indices is None:
+            return range(self.dataset.get("Metadata").attrs["datasetLength"])
+        else:
+            return example_indices
+
+    def get_v(self, view_index, example_indices=None):
+        example_indices = self.init_example_indces(example_indices)
+        if type(example_indices) is int:
+            return self.dataset.get("View" + str(view_index))[example_indices, :]
+        else:
+            example_indices = np.array(example_indices)
+            sorted_indices = np.argsort(example_indices)
+            example_indices = example_indices[sorted_indices]
+
+            if not self.dataset.get("View" + str(view_index)).attrs["sparse"]:
+                return self.dataset.get("View" + str(view_index))[example_indices, :][
+                       np.argsort(sorted_indices), :]
+            else:
+                sparse_mat = sparse.csr_matrix(
+                    (self.dataset.get("View" + str(view_index)).get("data").value,
+                     self.dataset.get("View" + str(view_index)).get("indices").value,
+                     self.dataset.get("View" + str(view_index)).get("indptr").value),
+                    shape=self.dataset.get("View" + str(view_index)).attrs["shape"])[
+                             example_indices, :][
+                             np.argsort(sorted_indices), :]
+
+                return sparse_mat
+
+    def get_nb_class(self, example_indices=None):
+        example_indices = self.init_example_indces(example_indices)
+        return len(np.unique(self.dataset.get("Labels").value[example_indices]))
+
 
 def get_v(dataset, view_index, used_indices=None):
     """Used to extract a view as a numpy array or a sparse mat from the HDF5 dataset"""
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DifficultyMeasure/test_DifficultyMeasureModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_DifficultyMeasure/test_DifficultyMeasureModule.py
deleted file mode 100644
index 4dd7cdef6269e5c6daf9dd64a41a8159e320701e..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_DifficultyMeasure/test_DifficultyMeasureModule.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import unittest
-
-import numpy as np
-
-from ....mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion import \
-    difficulty_fusion
-
-
-class Test_difficulty(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.classifiersDecisions = np.array([
-            [np.random.randint(0, 2, (2, 5)),
-             np.array([[0, 0, 1, 0, 1], [0, 1, 0, 1, 0]]),
-             np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5))],
-            [np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5)),
-             np.array([[0, 0, 1, 1, 0], [0, 1, 0, 1, 0]]),
-             np.random.randint(0, 2, (2, 5))],
-            [np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.array([[0, 1, 1, 1, 1], [0, 1, 0, 1, 0]])],
-        ])
-        cls.combination = [1, 3, 4]
-        cls.foldsGroudTruth = np.array([[1, 1, 0, 0, 1], [0, 1, 0, 1, 0]])
-        cls.foldsLen = ""
-
-    def test_simple(cls):
-        difficulty_measure = difficulty_fusion.difficulty(
-            cls.classifiersDecisions,
-            cls.combination,
-            cls.foldsGroudTruth,
-            cls.foldsLen)
-        cls.assertAlmostEqual(difficulty_measure, 0.29861111111)
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py
deleted file mode 100644
index 373cb6296e83f56083e3ab22bcfebcc7a91258b1..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_DisagreeFusion/test_DisagreeFusionModule.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-from ....mono_multi_view_classifiers.multiview_classifiers.disagree_fusion import \
-    disagree_fusion
-from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
-
-class Test_disagreement(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.randomState = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.viewsIndices =  np.array([0, 1])
-        cls.classifiersDecisions = np.zeros((cls.viewsIndices.shape[0], len(cls.allClassifiersNames), 3, 6),
-                                            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.viewsIndices):
-                cls.classifiersDecisions[view_index, classifer_index] = np.array([
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array([np.array([1,1,1,0,0,0]) for _ in range(3)])
-        cls.classificationIndices = np.array([])
-
-    def test_simple(cls):
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions, disagree_fusion.disagree, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.666666666667)
-        cls.assertEqual(len(bestCombi), 2)
-
-    def test_multipleViews(cls):
-        cls.viewsIndices = np.array([0, 6, 18])
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.classifiersDecisions = np.zeros(
-            (cls.viewsIndices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.viewsIndices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6)])
-
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions,
-            disagree_fusion.disagree, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.55555555555555)
-        cls.assertEqual(len(bestCombi), 3)
-
-
-class Test_disagree(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoviewDecision1 = np.array([0, 0, 1, 1])
-        cls.monoviewDecision2 = np.array([0, 1, 0, 1])
-        cls.ground_truth = None
-
-    def test_simple(cls):
-        disagreement = disagree_fusion.disagree(cls.monoviewDecision1,
-                                                cls.monoviewDecision2,
-                                                cls.ground_truth)
-        np.testing.assert_array_equal(disagreement,
-                                      np.array([False, True, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py
deleted file mode 100644
index 77c73435cd06300ab20809599e72e640cfbe4dc1..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_DoubleFaultFusion/test_DoubleFaultFusionModule.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-from ....mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion import \
-    double_fault_fusion
-
-
-class Test_doubleFaultRatio(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.randomState = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.directory = ""
-        cls.viewsIndices = np.array([0, 1])
-        cls.classifiersDecisions = np.zeros(
-            (cls.viewsIndices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.viewsIndices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array([np.array([1,1,1,0,0,0]) for _ in range(3)])
-
-    def test_simple(cls):
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames,cls.classifiersDecisions,
-            double_fault_fusion.doubleFault, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.3888888888888)
-        cls.assertEqual(len(bestCombi), 2)
-
-    def test_multipleViews(cls):
-        cls.viewsIndices = np.array([0, 6, 18])
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.classifiersDecisions = np.zeros(
-            (cls.viewsIndices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.viewsIndices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6)])
-        bestCombi, disagreement = diversity_utils.couple_div_measure(
-            cls.allClassifiersNames, cls.classifiersDecisions,
-            double_fault_fusion.doubleFault, cls.folds_ground_truth)
-        cls.assertAlmostEqual(disagreement, 0.3333333333)
-        cls.assertEqual(len(bestCombi), 3)
-
-
-class Test_doubleFault(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoviewDecision1 = np.array([0, 0, 1, 1, 0, 0, 1, 1])
-        cls.monoviewDecision2 = np.array([0, 1, 0, 1, 0, 1, 0, 1])
-        cls.ground_truth = np.array([0, 0, 0, 0, 1, 1, 1, 1])
-
-    def test_simple(cls):
-        disagreement = double_fault_fusion.doubleFault(
-            cls.monoviewDecision1, cls.monoviewDecision2, cls.ground_truth)
-        np.testing.assert_array_equal(disagreement, np.array(
-            [False, False, False, True, True, False, False, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_EntropyFusion/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_EntropyFusion/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_EntropyFusion/test_EntropyFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_EntropyFusion/test_EntropyFusionModule.py
deleted file mode 100644
index 605c6ab82d27e83116ba5d584204063e1475d894..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_EntropyFusion/test_EntropyFusionModule.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import unittest
-
-import numpy as np
-
-from ....mono_multi_view_classifiers.multiview_classifiers.entropy_fusion import \
-    entropy_fusion
-
-
-class Test_entropy(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.classifiersDecisions = np.array([
-            [np.random.randint(0, 2, (2, 5)),
-             [[0, 0, 1, 0, 1], [0, 1, 0, 1, 0]],
-             np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5))],
-            [np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5)),
-             [[0, 0, 1, 1, 0], [0, 1, 0, 1, 0]],
-             np.random.randint(0, 2, (2, 5))],
-            [np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             np.random.randint(0, 2, (2, 5)), np.random.randint(0, 2, (2, 5)),
-             [[0, 1, 1, 1, 1], [0, 1, 0, 1, 0]]],
-        ])
-        cls.combination = [1, 3, 4]
-        cls.foldsGroudTruth = np.array([[1, 1, 0, 0, 1], [0, 1, 0, 1, 0]])
-        cls.foldsLen = ""
-
-    def test_simple(cls):
-        entropy_score = entropy_fusion.entropy(cls.classifiersDecisions,
-                                               cls.combination,
-                                               cls.foldsGroudTruth,
-                                               cls.foldsLen)
-        cls.assertEqual(entropy_score, 0.15, 'Wrong values for entropy measure')
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py
index be4f271c5b9bdee74da924b037f1a58b827405fb..65e22eb8f7dff86aec92af8d1c7adc9e21838d49 100644
--- a/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py
+++ b/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py
@@ -2,7 +2,7 @@
 #
 # import numpy as np
 #
-# from ....mono_multi_view_classifiers.multiview_classifiers.entropy_fusion import EntropyFusionModule
+# from ....mono_multi_view_classifiers.multiview_classifiers.entropy_fusion_old import EntropyFusionModule
 #
 # class Test_entropy(unittest.TestCase):
 #
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_DifficultyMeasure/__init__.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/__init__.py
similarity index 100%
rename from multiview_platform/tests/test_multiview_classifiers/Test_DifficultyMeasure/__init__.py
rename to multiview_platform/tests/test_multiview_classifiers/test_additions/__init__.py
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c22d615d12d8640b5f96c35dfb67999381fb138
--- /dev/null
+++ b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
@@ -0,0 +1,69 @@
+import unittest
+import numpy as np
+
+from ....mono_multi_view_classifiers.multiview_classifiers.additions import diversity_utils
+
+
+class FakeDataset():
+
+    def __init__(self, views, labels):
+        self.nb_views = views.shape[0]
+        self.dataset_length = views.shape[2]
+        self.views = views
+        self.labels = labels
+
+    def get_v(self, view_index, example_indices):
+        return self.views[view_index, example_indices]
+
+    def get_nb_class(self, example_indices):
+        return np.unique(self.labels[example_indices])
+
+class FakeDivCoupleClf(diversity_utils.CoupleDiversityFusion):
+
+    def __init__(self, rs, classifier_names=None,
+                 classifiers_config=None, monoview_estimators=None):
+        super(FakeDivCoupleClf, self).__init__(random_state=rs,
+                                               classifier_names=classifier_names,
+                                               classifiers_configs=classifiers_config,
+                                               monoview_estimators=monoview_estimators)
+        self.rs = rs
+
+    def diversity_score(self, a, b, c):
+        return self.rs.randint(0,100)
+
+
+class FakeDivGlobalClf(diversity_utils.GlobalDiversityFusion):
+
+    def __init__(self, rs, classifier_names=None,
+                 classifiers_config=None, monoview_estimators=None):
+        super(FakeDivGlobalClf, self).__init__(random_state=rs,
+                                               classifier_names=classifier_names,
+                                               classifiers_configs=classifiers_config,
+                                               monoview_estimators=monoview_estimators)
+        self.rs = rs
+
+    def diversity_score(self, a, b, c):
+        return self.rs.randint(0,100)
+
+class Test_DiversityFusion(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.classifier_names = ["cb_boost", "decision_tree"]
+        cls.classifiers_config = {"cb_boost":{"n_stumps":1, "n_iterations":5}}
+        cls.random_state = np.random.RandomState(42)
+        cls.y = cls.random_state.randint(0,2,6)
+        cls.X = FakeDataset(cls.random_state.randint(0,100,(2,5,6)), cls.y)
+        cls.train_indices = [0,1,2,4]
+        cls.views_indices = [0,1]
+
+    def test_simple_couple(self):
+        clf = FakeDivCoupleClf(self.random_state, classifier_names=self.classifier_names,
+                                              classifiers_config=self.classifiers_config)
+        clf.fit(self.X, self.y, self.train_indices, self.views_indices)
+
+    def test_simple_global(self):
+        clf = FakeDivGlobalClf(self.random_state,
+                               classifier_names=self.classifier_names,
+                               classifiers_config=self.classifiers_config)
+        clf.fit(self.X, self.y, self.train_indices, self.views_indices)
\ No newline at end of file
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..4efc60546dcf049f8f9ce6613a713262f1ee42e5
--- /dev/null
+++ b/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py
@@ -0,0 +1,23 @@
+import unittest
+
+import numpy as np
+
+from ...mono_multi_view_classifiers.multiview_classifiers import difficulty_fusion
+
+
+class Test_difficulty_fusion(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.random_state=np.random.RandomState(42)
+        cls.classifiers_decisions = cls.random_state.randint(0, 2, size=(5, 3, 5))
+        cls.combination = [1, 3, 4]
+        cls.y = np.array([1, 1, 0, 0, 1])
+        cls.difficulty_fusion_clf = difficulty_fusion.DifficultyFusion()
+
+    def test_simple(cls):
+        difficulty_measure = cls.difficulty_fusion_clf.diversity_measure(
+            cls.classifiers_decisions,
+            cls.combination,
+            cls.y)
+        cls.assertAlmostEqual(difficulty_measure, 0.22453703703703706)
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ae051fa653f7e2ad132087682cc3fef4996f5f6
--- /dev/null
+++ b/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py
@@ -0,0 +1,23 @@
+# # import unittest
+#
+import numpy as np
+import unittest
+#
+from ...mono_multi_view_classifiers.multiview_classifiers import disagree_fusion
+
+
+class Test_disagree(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.monoview_decision_1 = np.array([0, 0, 1, 1])
+        cls.monoview_decision_2 = np.array([0, 1, 0, 1])
+        cls.ground_truth = None
+        cls.clf = disagree_fusion.DisagreeFusion()
+
+    def test_simple(cls):
+        disagreement = cls.clf.diversity_measure(cls.monoview_decision_1,
+                                                    cls.monoview_decision_2,
+                                                    cls.ground_truth)
+        np.testing.assert_array_equal(disagreement,
+                                      np.array([False, True, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
index 11b74a75abc59b18e5d2dd8a80537a3811fd9155..8b137891791fe96927ad78e64b0aad7bded08bdc 100644
--- a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
+++ b/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
@@ -1,44 +1 @@
-import unittest
 
-import numpy as np
-
-from ..utils import rm_tmp
-from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-    diversity_utils
-
-
-def fake_measure(a, b, c, d, e):
-    return 42
-
-
-class Test_global_div_measure(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.randomState = np.random.RandomState(42)
-        cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-        cls.viewsIndices = np.array([0, 1])
-        cls.classifiersDecisions = np.zeros(
-            (cls.viewsIndices.shape[0], len(cls.allClassifiersNames), 3, 6),
-            dtype=int)
-        for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-            for view_index, view in enumerate(cls.viewsIndices):
-                cls.classifiersDecisions[
-                    view_index, classifer_index] = np.array([
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6),
-                    cls.randomState.randint(0, 2, 6)])
-        cls.folds_ground_truth = np.array(
-            [np.array([1, 1, 1, 0, 0, 0]) for _ in range(3)])
-        cls.classificationIndices = np.array([])
-        cls.measurement = fake_measure
-
-    def test_simple(cls):
-        clf_names, diversity_measure = diversity_utils.global_div_measure(
-            cls.allClassifiersNames,
-            cls.classifiersDecisions,
-            cls.measurement,
-            cls.folds_ground_truth)
-        cls.assertEqual(len(clf_names), 2)
-        cls.assertEqual(diversity_measure, 42)
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a0b7f6f29edeb3ee196e7f9cb60309fec96f2b4
--- /dev/null
+++ b/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py
@@ -0,0 +1,22 @@
+
+import numpy as np
+import unittest
+
+from ...mono_multi_view_classifiers.multiview_classifiers import double_fault_fusion
+
+
+class Test_disagree(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.monoview_decision_1 = np.array([0, 0, 0, 0, 1, 1, 1, 1])
+        cls.monoview_decision_2 = np.array([0, 0, 1, 1, 0, 0, 1, 1])
+        cls.ground_truth = np.array([0, 1, 0, 1, 0, 1, 0, 1])
+        cls.clf = double_fault_fusion.DoubleFaultFusion()
+
+    def test_simple(cls):
+        double_fault = cls.clf.diversity_measure(cls.monoview_decision_1,
+                                                    cls.monoview_decision_2,
+                                                    cls.ground_truth)
+        np.testing.assert_array_equal(double_fault,
+                                      np.array([False, True, False, False, False, False, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..765a0b793c0deab1687d540d565f7f17002b9f17
--- /dev/null
+++ b/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py
@@ -0,0 +1,23 @@
+import unittest
+
+import numpy as np
+
+from ...mono_multi_view_classifiers.multiview_classifiers import entropy_fusion
+
+
+class Test_difficulty_fusion(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.random_state=np.random.RandomState(42)
+        cls.classifiers_decisions = cls.random_state.randint(0, 2, size=(5, 3, 5))
+        cls.combination = [1, 3, 4]
+        cls.y = np.array([1, 1, 0, 0, 1])
+        cls.clf = entropy_fusion.EntropyFusion()
+
+    def test_simple(cls):
+        entropy = cls.clf.diversity_measure(
+            cls.classifiers_decisions,
+            cls.combination,
+            cls.y)
+        cls.assertAlmostEqual(entropy, 0.2)