diff --git a/config_files/config.ini b/config_files/config.ini
index 027ca27a52ab8706682e1dd4770a103aa8cce11f..f77c2e4acc63a755b6db59aebbc635f4da62bf51 100644
--- a/config_files/config.ini
+++ b/config_files/config.ini
@@ -7,7 +7,7 @@ type =          str ; .hdf5
 views =         list_str ; all
 pathF =         str ; ../Data/
 nice =          int ; 0
-randomState =   str ; None
+randomState =   str ; 42
 nbCores =       int ; 1
 full =          bool ; yes
 debug =         bool ; yes
@@ -23,13 +23,13 @@ split = float ; 0.8
 nbFolds = int ; 2
 nbClass = int ; 2
 classes = list_str ; yes no
-type = list_str ; Monoview Multiview
+type = list_str ; monoview multiview
 algos_monoview = list_str ; all
 algos_multiview = list_str ; all
 statsiter = int ; 2
 metrics = list_str ; accuracy_score f1_score
 metric_princ = str ; f1_score
-HPS_type = str ; randomizedSearch
+HPS_type = str ; randomized_search
 HPS_iter = int ; 2
 
 
@@ -38,150 +38,97 @@ HPS_iter = int ; 2
 # The Monoview Classifier arguments #
 #####################################
 
-[RandomForest]
+[random_forest]
 n_estimators = list_int ; 25
 max_depth = list_int ; 3
 criterion = list_str ; entropy
 
-[SVMLinear]
+[svm_linear]
 C = list_float ; 1
 
-[SVMRBF]
+[svm_rbf]
 C = list_float ; 1
 
-[SVMPoly]
+[svm_poly]
 C = list_float ; 1
 degree = list_int ; 2
 
-[Adaboost]
+[adaboost]
 n_estimators = list_int ; 50
 base_estimator = list_str ; DecisionTreeClassifier
 
-[AdaboostPregen]
+[adaboost_pregen]
 n_estimators = list_int ; 50
 base_estimator = list_str ; DecisionTreeClassifier
 n_stumps = list_int ; 1
 
-[AdaboostPregen10]
-n_estimators = list_int ; 50
-base_estimator = list_str ; DecisionTreeClassifier
-n_stumps = list_int ; 1
-
-[AdaboostGraalpy]
+[adaboost_graalpy]
 n_iterations = list_int ; 50
 n_stumps = list_int ; 1
 
-[DecisionTree]
+[decision_tree]
 max_depth = list_int ; 10
 criterion = list_str ; gini
 splitter = list_str ; best
 
-[DecisionTreePregen]
+[decision_tree_pregen]
 max_depth = list_int ; 10
 criterion = list_str ; gini
 splitter = list_str ; best
 n_stumps = list_int ; 1
 
-[SGD]
+[sgd]
 loss = list_str ; hinge
 penalty = list_str ; l2
 alpha = list_float ; 0.0001
 
-[KNN]
+[knn]
 n_neighbors = list_int ; 5
 weights = list_str ; uniform
 algorithm = list_str ; auto
 
-[SCM]
+[scm]
 model_type = list_str ; conjunction
 max_rules = list_int ; 10
 p = list_float ; 0.1
 
-[SCMPregen]
+[scm_pregen]
 model_type = list_str ; conjunction
 max_rules = list_int ; 10
 p = list_float ; 0.1
 n_stumps = list_int ; 1
 
-[CQBoost]
+[cq_boost]
 mu = list_float ; 0.01
 epsilon = list_float ; 1e-06
 n_max_iterations = list_int ; 5
 n_stumps = list_int ; 1
 
-[CGDesc]
+[cg_desc]
 n_max_iterations = list_int ; 10
 n_stumps = list_int ; 1
 
-[CGDesc10]
+[cb_boost]
 n_max_iterations = list_int ; 10
 n_stumps = list_int ; 1
 
-[CGreed]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[QarBoost]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[QarBoostNC3]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[QarBoostv2]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[QarBoostv3]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[CBBoost]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-
-[CGDescTree]
-n_max_iterations = list_int ; 10
-n_stumps = list_int ; 1
-max_depth = list_int ; 2
-
-[MinCQGraalpy]
+[min_cq_graalpy]
 mu = list_float ; 0.01
 n_stumps_per_attribute = list_int ; 1
 
-[MinCQGraalpyTree]
+[min_cq_graalpy_tree]
 mu = list_float ; 0.01
 n_stumps_per_attribute = list_int ; 1
 max_depth = list_int ; 2
 
-[CQBoostTree]
-mu = list_float ; 0.01
-epsilon = list_float ; 1e-06
-n_max_iterations = list_int ; 5
-n_stumps = list_int ; 1
-max_depth = list_int ; 2
-
-[SCMPregenTree]
-max_rules = list_int ; 5
-model_type = list_str ; conjunction
-n_stumps = list_int ; 1
-max_depth = list_int ; 2
-
-[AdaboostPregenTree]
-n_estimators = list_int ; 50
-base_estimator = list_str ; DecisionTreeClassifier
-n_stumps = list_int ; 1
-max_depth = list_int ; 2
-
-[Lasso]
+[lasso]
 alpha = list_float ; 1
 max_iter = list_int ; 2
 
-[GradientBoosting]
+[gradient_boosting]
 n_estimators = list_int ; 2
 
-[MinCQ]
+[min_cq]
 mu = list_float ; 0.01
 n_stumps_per_attribute = list_int ; 1
 
diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
index c834e3cd086f9684bb3d78d344e9a290c85d8782..6dc0d255fe233bfc660e20d3c29dc68211705ae9 100644
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
@@ -69,34 +69,38 @@ def initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args):
             benchmark["monoview"] = monoviewAlgos
 
     if "multiview" in CL_type:
-        benchmark["multiview"] = {}
-        if multiviewAlgos == ["all"]:
-            algosMutliview = allMultiviewPackages
-        else:
-            algosMutliview = multiviewAlgos
-        for multiviewPackageName in allMultiviewPackages:
-            if multiviewPackageName in algosMutliview:
-                multiviewPackage = getattr(multiview_classifiers,
-                                           multiviewPackageName)
-                multiviewModule = getattr(multiviewPackage,
-                                          multiviewPackageName + "Module")
-                benchmark = multiviewModule.getBenchmark(benchmark, args=args)
-
-    if CL_type == ["Benchmark"]:
-        allMonoviewAlgos = [name for _, name, isPackage in
-                            pkgutil.iter_modules([
-                                                     './mono_multi_view_classifiers/monoview_classifiers'])
-                            if (not isPackage) and name not in ["framework"]]
-        benchmark["monoview"] = allMonoviewAlgos
-        benchmark["multiview"] = dict(
-            (multiviewPackageName, "_") for multiviewPackageName in
-            allMultiviewPackages)
-        for multiviewPackageName in allMultiviewPackages:
-            multiviewPackage = getattr(multiview_classifiers,
-                                       multiviewPackageName)
-            multiviewModule = getattr(multiviewPackage,
-                                      multiviewPackageName + "Module")
-            benchmark = multiviewModule.getBenchmark(benchmark, args=args)
+        benchmark["multiview"] = [name for _, name, isPackage in
+                                 pkgutil.iter_modules([
+                                     "./mono_multi_view_classifiers/multiview_classifiers"])
+                                 if not isPackage]
+        # benchmark["multiview"] = {}
+        # if multiviewAlgos == ["all"]:
+        #     algosMutliview = allMultiviewPackages
+        # else:
+        #     algosMutliview = multiviewAlgos
+        # for multiviewPackageName in allMultiviewPackages:
+        #     if multiviewPackageName in algosMutliview:
+        #         multiviewPackage = getattr(multiview_classifiers,
+        #                                    multiviewPackageName)
+        #         multiviewModule = getattr(multiviewPackage,
+        #                                   multiviewPackageName + "Module")
+        #         benchmark = multiviewModule.getBenchmark(benchmark, args=args)
+
+    # if CL_type == ["Benchmark"]:
+    #     allMonoviewAlgos = [name for _, name, isPackage in
+    #                         pkgutil.iter_modules([
+    #                                                  './mono_multi_view_classifiers/monoview_classifiers'])
+    #                         if (not isPackage) and name not in ["framework"]]
+    #     benchmark["monoview"] = allMonoviewAlgos
+    #     benchmark["multiview"] = dict(
+    #         (multiviewPackageName, "_") for multiviewPackageName in
+    #         allMultiviewPackages)
+    #     for multiviewPackageName in allMultiviewPackages:
+    #         multiviewPackage = getattr(multiview_classifiers,
+    #                                    multiviewPackageName)
+    #         multiviewModule = getattr(multiviewPackage,
+    #                                   multiviewPackageName + "Module")
+    #         benchmark = multiviewModule.getBenchmark(benchmark, args=args)
 
     return benchmark
 
@@ -649,8 +653,6 @@ def execClassif(arguments):
                     metrics[metricIndex] = [metric[0], None]
 
             benchmark = initBenchmark(CL_type, monoviewAlgos, multiviewAlgos, args)
-            print(benchmark)
-            import pdb;pdb.set_trace()
             initKWARGS = initKWARGSFunc(args, benchmark)
             dataBaseTime = time.time() - start
             argumentDictionaries = initMonoviewExps(benchmark, viewsDictionary,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/additions/CGDescUtils.py b/multiview_platform/mono_multi_view_classifiers/monoview/additions/CGDescUtils.py
index c17b30783d844ddde1ba3927d574277929d83c8b..ee90e90bb8175f1c5aacbb658d0dd1063a414b3a 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/additions/CGDescUtils.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/additions/CGDescUtils.py
@@ -17,7 +17,7 @@ from ... import metrics
 # Used for QarBoost and CGreed
 
 class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost):
-    def __init__(self, n_max_iterations=None, estimators_generator="Stumps",
+    def __init__(self, n_max_iterations=None, estimators_generator="Stumps", max_depth=1,
                  random_state=42, self_complemented=True, twice_the_same=False,
                  c_bound_choice=True, random_start=True,
                  n_stumps=1, use_r=True, c_bound_sol=True,
@@ -72,17 +72,9 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost):
                                        "c_bound_choice", "random_start",
                                        "n_stumps", "use_r", "c_bound_sol"]
         self.mincq_tracking = mincq_tracking
+        self.max_depth = max_depth
 
     def fit(self, X, y):
-        ones = []
-        tows = []
-        threes = []
-        fours = []
-        fives = []
-        sixes = []
-        sevens = []
-        eights = []
-
 
         formatted_X, formatted_y = self.format_X_y(X, y)
 
@@ -351,11 +343,11 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost):
 
     def init_hypotheses(self, X, y):
         """Inintialization for the hyptotheses used to build the boosted vote"""
-        if self.estimators_generator is "Stumps":
+        if self.estimators_generator == "Stumps":
             self.estimators_generator = StumpsClassifiersGenerator(
                 n_stumps_per_attribute=self.n_stumps,
                 self_complemented=self.self_complemented)
-        if self.estimators_generator is "Trees":
+        if self.estimators_generator == "Trees":
             self.estimators_generator = TreeClassifiersGenerator(
                 n_trees=self.n_stumps, max_depth=self.max_depth,
                 self_complemented=self.self_complemented)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/additions/CQBoostUtils.py b/multiview_platform/mono_multi_view_classifiers/monoview/additions/CQBoostUtils.py
index df9f23f1f3756bcda9124069d5b99f9a87270dab..40122b1d542b9091e2e7142326b26fce50be7bb9 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/additions/CQBoostUtils.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/additions/CQBoostUtils.py
@@ -17,7 +17,7 @@ from ... import metrics
 
 class ColumnGenerationClassifier(BaseEstimator, ClassifierMixin, BaseBoost):
     def __init__(self, mu=0.01, epsilon=1e-06, n_max_iterations=100,
-                 estimators_generator="Stumps", dual_constraint_rhs=0,
+                 estimators_generator="Stumps", dual_constraint_rhs=0, max_depth=1,
                  save_iteration_as_hyperparameter_each=None, random_state=None):
         super(ColumnGenerationClassifier, self).__init__()
         self.epsilon = epsilon
@@ -25,6 +25,7 @@ class ColumnGenerationClassifier(BaseEstimator, ClassifierMixin, BaseBoost):
         self.estimators_generator = estimators_generator
         self.dual_constraint_rhs = dual_constraint_rhs
         self.mu = mu
+        self.max_depth=max_depth
         self.train_time = 0
         self.plotted_metric = metrics.zero_one_loss
         self.random_state = random_state
@@ -79,15 +80,18 @@ class ColumnGenerationClassifier(BaseEstimator, ClassifierMixin, BaseBoost):
                 np.squeeze(np.array((alpha).T.dot(y_kernel_matrix).T)),
                 fill_value=-np.inf)
 
-            h_values[self.chosen_columns_] = ma.masked
+            if self.chosen_columns_:
+                h_values[self.chosen_columns_] = ma.masked
+
             worst_h_index = ma.argmax(h_values)
 
             # Check for optimal solution. We ensure at least one complete iteration is done as the initialization
             # values might provide a degenerate initial solution.
-            if h_values[
-                worst_h_index] <= self.dual_constraint_rhs + self.epsilon and len(
-                    self.chosen_columns_) > 0:
-                break
+            if self.chosen_columns_:
+                if h_values[
+                    worst_h_index] <= self.dual_constraint_rhs + self.epsilon and len(
+                        self.chosen_columns_) > 0:
+                    break
 
             # Append the weak hypothesis.
             self.chosen_columns_.append(worst_h_index)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
index 9e6a5db61ddbaf10b31a4dcbadc29c27b44629b3..87e50317d40f97a791cf349324370cbf6f739c3e 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
@@ -48,24 +48,24 @@ def getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred):
     return metricScoreString, [metricScoreTrain, metricScoreTest]
 
 
-def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter,
+def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics_list, nIter,
             feat, CL_type, clKWARGS, classLabelsNames,
             shape, y_train, y_train_pred, y_test, y_test_pred, time,
             randomState, classifier, directory):
     metricsScores = {}
-    metricModule = getattr(metrics, metrics[0][0])
+    metricModule = getattr(metrics, metrics_list[0][0])
     trainScore = metricModule.score(y_train, y_train_pred)
     testScore = metricModule.score(y_test, y_test_pred)
     stringAnalysis = "Classification on " + name + " database for " + feat + " with " + CL_type + ".\n\n"
-    stringAnalysis += metrics[0][0] + " on train : " + str(trainScore) + "\n" + \
-                      metrics[0][0] + " on test : " + str(
+    stringAnalysis += metrics_list[0][0] + " on train : " + str(trainScore) + "\n" + \
+                      metrics_list[0][0] + " on test : " + str(
         testScore) + "\n\n"
     stringAnalysis += getDBConfigString(name, feat, learningRate, shape,
                                         classLabelsNames, KFolds)
     classifierConfigString, classifierIntepretString = getClassifierConfigString(
         gridSearch, nbCores, nIter, clKWARGS, classifier, directory, y_test)
     stringAnalysis += classifierConfigString
-    for metric in metrics:
+    for metric in metrics_list:
         metricString, metricScore = getMetricScore(metric, y_train,
                                                    y_train_pred, y_test,
                                                    y_test_pred)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
index 49b2fa2f848dde1182eb47784b4a3cca7b7ba38b..635e09c63fa4c995ab78e37e3e46be083f35ac3f 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
@@ -77,16 +77,17 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices,
     logging.debug("Done:\t Determine Train/Test split")
 
     logging.debug("Start:\t Generate classifier args")
-    classifierModuleName = CL_type.split("_")[0]
-    classifierModule = getattr(monoview_classifiers, classifierModuleName)
+    classifierModule = getattr(monoview_classifiers, CL_type)
+    classifier_class_name = classifierModule.classifier_class_name
     clKWARGS, testFoldsPreds = getHPs(classifierModule, hyperParamSearch,
-                                      nIter, CL_type, X_train, y_train,
+                                      nIter, CL_type, classifier_class_name,
+                                      X_train, y_train,
                                       randomState, outputFileName,
                                       KFolds, nbCores, metrics, kwargs)
     logging.debug("Done:\t Generate classifier args")
 
     logging.debug("Start:\t Training")
-    classifier = getattr(classifierModule, classifierModuleName)(randomState, **clKWARGS)
+    classifier = getattr(classifierModule, classifier_class_name)(randomState, **clKWARGS)
 
     classifier.fit(X_train, y_train)  # NB_CORES=nbCores,
     logging.debug("Done:\t Training")
@@ -176,27 +177,29 @@ def initTrainTest(X, Y, classificationIndices):
     return X_train, y_train, X_test, y_test, X_test_multiclass
 
 
-def getHPs(classifierModule, hyperParamSearch, nIter, CL_type, X_train, y_train,
+def getHPs(classifierModule, hyperParamSearch, nIter, classifier_module_name,
+           classifier_class_name, X_train, y_train,
            randomState,
            outputFileName, KFolds, nbCores, metrics, kwargs):
     if hyperParamSearch != "None":
         logging.debug(
             "Start:\t " + hyperParamSearch + " best settings with " + str(
-                nIter) + " iterations for " + CL_type)
+                nIter) + " iterations for " + classifier_module_name)
         classifierHPSearch = getattr(hyper_parameter_search, hyperParamSearch)
         clKWARGS, testFoldsPreds = classifierHPSearch(X_train, y_train, "monoview",
                                                       randomState,
                                                       outputFileName,
-                                                      classifierModule, CL_type,
+                                                      classifierModule,
+                                                      classifier_class_name,
                                                       folds=KFolds,
                                                       nb_cores=nbCores,
                                                       metric=metrics[0],
                                                       n_iter=nIter,
                                                       classifier_kwargs=kwargs[
-                                                          CL_type + "KWARGS"])
+                                                          classifier_module_name + "KWARGS"])
         logging.debug("Done:\t " + hyperParamSearch + " best settings")
     else:
-        clKWARGS = kwargs[CL_type + "KWARGS"]
+        clKWARGS = kwargs[classifier_module_name + "KWARGS"]
         testFoldsPreds = None
     return clKWARGS, testFoldsPreds
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py b/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
index 34da170636c08d158470f972378c4c8993801310..8f9d6b103bc3fa447842fc104cb50872aad5a724 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
@@ -59,7 +59,7 @@ class CustomRandint:
     It can be used with a multiplier agrument to be able to perform more complex generation
     for example 10 e -(randint)"""
 
-    def __init__(self, low=0, high=0, multiplier=""):
+    def __init__(self,low=0, high=0, multiplier=""):
         self.randint = randint(low, high)
         self.multiplier = multiplier
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
index 0547e5c299e4e757bf87a2259ed634bd16a0388e..6160c013ce2633fbc431d92b2de8bdfa6a14ab10 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
@@ -12,6 +12,8 @@ from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+classifier_class_name = "Adaboost"
+
 
 class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graal.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graalpy.py
similarity index 99%
rename from multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graal.py
rename to multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graalpy.py
index 03618cad734d057633be43d714864f8f54cfb9ab..3ffd5e232e665e03baadcba0c67b361c379a07b3 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graal.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_graalpy.py
@@ -10,6 +10,7 @@ from ..monoview.additions.BoostUtils import StumpsClassifiersGenerator, \
 from ..monoview.monoview_utils import CustomRandint, \
     BaseMonoviewClassifier, change_label_to_minus, change_label_to_zero
 
+classifier_class_name = "AdaboostGraalpy"
 
 class AdaBoostGP(BaseEstimator, ClassifierMixin, BaseBoost):
     """Scikit-Learn compatible AdaBoost classifier. Original code by Pascal Germain, adapted by Jean-Francis Roy.
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen.py
index ba960986d94872a8899f04e03ecf215f1fd6b9c4..ec3fa8387d94365cb3537c1e6734851831678787 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen.py
@@ -15,11 +15,14 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "AdaboostPregen"
+
 class AdaboostPregen(AdaBoostClassifier, BaseMonoviewClassifier,
                      PregenClassifier):
 
     def __init__(self, random_state=None, n_estimators=50,
-                 base_estimator=None, n_stumps=1, self_complemeted=True,
+                 base_estimator=None, n_stumps=1, estimators_generator="Stumps",
+                 max_depth=1, self_complemeted=True,
                  **kwargs):
         super(AdaboostPregen, self).__init__(
             random_state=random_state,
@@ -28,16 +31,19 @@ class AdaboostPregen(AdaBoostClassifier, BaseMonoviewClassifier,
             algorithm="SAMME"
         )
         self.param_names = ["n_estimators", "base_estimator", "n_stumps",
+                            "estimators_generator", "max_depth",
                             "random_state"]
         self.classed_params = ["base_estimator"]
         self.distribs = [CustomRandint(low=1, high=500),
                          [DecisionTreeClassifier(max_depth=1)], [n_stumps],
+                         ["Stumps", "Tree"], CustomRandint(low=1, high=5),
                          [random_state]]
         self.weird_strings = {"base_estimator": "class_name"}
         self.plotted_metric = metrics.zero_one_loss
         self.plotted_metric_name = "zero_one_loss"
         self.step_predictions = None
-        self.estimators_generator = "Stumps"
+        self.estimators_generator = estimators_generator
+        self.max_depth = max_depth
         self.n_stumps = n_stumps
         self.self_complemented = self_complemeted
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen10.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen10.py
deleted file mode 100644
index e7d0e21f78b3b7a257c1e87b5b4690de8c0c8dc9..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen10.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from sklearn.tree import DecisionTreeClassifier
-
-from .adaboost_pregen import AdaboostPregen
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-class AdaboostPregen10(AdaboostPregen):
-
-    def __init__(self, random_state=None, n_estimators=50,
-                 base_estimator=None, n_stumps=1, self_complemeted=True,
-                 **kwargs):
-        super(AdaboostPregen10, self).__init__(
-            random_state=random_state,
-            n_estimators=100,
-            base_estimator=base_estimator,
-            n_stumps=10,
-            self_complemeted=self_complemeted
-        )
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {'n_estimators': args.AdP_n_est,
-#                   'base_estimator': [DecisionTreeClassifier(max_depth=1)],
-#                   }
-#     return kwargsDict
-
-
-def paramsToSet(nIter, random_state):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_estimators": random_state.randint(1, 500),
-                          "base_estimator": None})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen_tree.py
deleted file mode 100644
index 8276f764477d299d7566903358768a2150410035..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost_pregen_tree.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import time
-
-import numpy as np
-from sklearn.ensemble import AdaBoostClassifier
-from sklearn.tree import DecisionTreeClassifier
-
-from .. import metrics
-from ..monoview.additions.BoostUtils import get_accuracy_graph
-from ..monoview.additions.PregenUtils import PregenClassifier
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
-    change_label_to_zero
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-class AdaboostPregenTree(AdaBoostClassifier, BaseMonoviewClassifier,
-                         PregenClassifier):
-
-    def __init__(self, random_state=None, n_estimators=50,
-                 base_estimator=None, n_stumps=1, self_complemeted=True,
-                 max_depth=2, **kwargs):
-        super(AdaboostPregenTree, self).__init__(
-            random_state=random_state,
-            n_estimators=n_estimators,
-            base_estimator=base_estimator,
-            algorithm="SAMME"
-        )
-        self.param_names = ["n_estimators", "base_estimator", "n_stumps",
-                            "random_state", "max_depth"]
-        self.classed_params = ["base_estimator"]
-        self.distribs = [CustomRandint(low=1, high=500),
-                         [DecisionTreeClassifier(max_depth=1)], [n_stumps],
-                         [random_state], [max_depth]]
-        self.weird_strings = {"base_estimator": "class_name"}
-        self.plotted_metric = metrics.zero_one_loss
-        self.plotted_metric_name = "zero_one_loss"
-        self.step_predictions = None
-        self.estimators_generator = "Trees"
-        self.n_stumps = n_stumps
-        self.max_depth = max_depth
-        self.self_complemented = self_complemeted
-        self.random_state = random_state
-
-    def fit(self, X, y, sample_weight=None):
-        pregen_X, pregen_y = self.pregen_voters(X, y,
-                                                generator=self.estimators_generator)
-        begin = time.time()
-        super(AdaboostPregenTree, self).fit(pregen_X, pregen_y,
-                                            sample_weight=sample_weight)
-        end = time.time()
-        self.train_time = end - begin
-        self.train_shape = pregen_X.shape
-        self.base_predictions = np.array(
-            [change_label_to_zero(estim.predict(pregen_X)) for estim in
-             self.estimators_])
-        self.metrics = np.array(
-            [self.plotted_metric.score(change_label_to_zero(pred), y) for pred
-             in self.staged_predict(pregen_X)])
-        self.bounds = np.array([np.prod(
-            np.sqrt(1 - 4 * np.square(0.5 - self.estimator_errors_[:i + 1])))
-                                for i in
-                                range(self.estimator_errors_.shape[0])])
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def predict(self, X):
-        begin = time.time()
-        pregen_X, _ = self.pregen_voters(X)
-        pred = super(AdaboostPregenTree, self).predict(pregen_X)
-        end = time.time()
-        self.pred_time = end - begin
-        if pregen_X.shape != self.train_shape:
-            self.step_predictions = np.array(
-                [change_label_to_zero(step_pred) for step_pred in
-                 self.staged_predict(pregen_X)])
-        return change_label_to_zero(pred)
-
-    def getInterpret(self, directory, y_test):
-        interpretString = ""
-        interpretString += self.getFeatureImportance(directory)
-        interpretString += "\n\n Estimator error | Estimator weight\n"
-        interpretString += "\n".join(
-            [str(error) + " | " + str(weight / sum(self.estimator_weights_)) for
-             error, weight in
-             zip(self.estimator_errors_, self.estimator_weights_)])
-        step_test_metrics = np.array(
-            [self.plotted_metric.score(y_test, step_pred) for step_pred in
-             self.step_predictions])
-        get_accuracy_graph(step_test_metrics, "AdaboostPregen",
-                           directory + "test_metrics.png",
-                           self.plotted_metric_name, set="test")
-        get_accuracy_graph(self.metrics, "AdaboostPregen",
-                           directory + "metrics.png", self.plotted_metric_name,
-                           bounds=list(self.bounds),
-                           bound_name="boosting bound")
-        np.savetxt(directory + "test_metrics.csv", step_test_metrics,
-                   delimiter=',')
-        np.savetxt(directory + "train_metrics.csv", self.metrics, delimiter=',')
-        np.savetxt(directory + "times.csv",
-                   np.array([self.train_time, self.pred_time]), delimiter=',')
-        return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {'n_estimators': args.AdPT_n_est,
-#                   'base_estimator': [DecisionTreeClassifier(max_depth=1)],
-#                   'n_stumps': args.AdPT_trees,
-#                   "max_depth": args.AdPT_max_depth}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, random_state):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_estimators": random_state.randint(1, 500),
-                          "base_estimator": None})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/c_greed.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/c_greed.py
deleted file mode 100644
index 823776895c48af7a6b3ac46e5b698c8fc96a2598..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/c_greed.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomRandint
-
-
-class CGreed(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, n_max_iterations=500, n_stumps=10,
-                 **kwargs):
-        super(CGreed, self).__init__(n_max_iterations=n_max_iterations,
-                                     random_state=random_state,
-                                     self_complemented=True,
-                                     twice_the_same=False,
-                                     c_bound_choice=True,
-                                     random_start=False,
-                                     n_stumps=n_stumps,
-                                     use_r=True,
-                                     c_bound_sol=True,
-                                     estimators_generator="Stumps"
-                                     )
-
-        self.param_names = ["n_max_iterations", "n_stumps", "random_state"]
-        self.distribs = [CustomRandint(low=2, high=1000), [n_stumps],
-                         [random_state]]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "CGr"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"n_stumps": args.CGR_stumps,
-#                   "n_max_iterations": args.CGR_n_iter}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cb_boost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cb_boost.py
index df7329ac376ec440bc9507bb3a4e710e4e6f44b0..c9340c4d2e8ef1cece11d30f249ad2b789b28af7 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cb_boost.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cb_boost.py
@@ -2,6 +2,8 @@ from ..monoview.additions.CBBoostUtils import CBBoostClassifier
 from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomRandint
 
 
+classifier_class_name = "CBBoost"
+
 class CBBoost(CBBoostClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, n_max_iterations=500, n_stumps=1,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc.py
index 66f4d287aba2821be2d597fc183ac6e7d9e2d351..7a881285807f714f5b5b493ccc0af073f7aeefb6 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc.py
@@ -2,25 +2,31 @@ from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
 from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomRandint
 
 
+classifier_class_name = "CGDesc"
+
 class CGDesc(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, n_max_iterations=500, n_stumps=1,
+                 estimators_generator="Stumps", twice_the_same=True, max_depth=1,
                  **kwargs):
         super(CGDesc, self).__init__(n_max_iterations=n_max_iterations,
                                      random_state=random_state,
                                      self_complemented=True,
-                                     twice_the_same=True,
+                                     twice_the_same=twice_the_same,
                                      c_bound_choice=True,
                                      random_start=False,
                                      n_stumps=n_stumps,
                                      use_r=False,
                                      c_bound_sol=True,
-                                     estimators_generator="Stumps",
+                                     estimators_generator=estimators_generator,
+                                     max_depth=max_depth,
                                      mincq_tracking=False,
                                      )
-        self.param_names = ["n_max_iterations", "n_stumps", "random_state"]
+        self.param_names = ["n_max_iterations", "n_stumps",
+                            "estimators_generator", "max_depth", "random_state", "twice_the_same"]
         self.distribs = [CustomRandint(low=2, high=500), [n_stumps],
-                         [random_state]]
+                         ["Stumps", "Trees"], CustomRandint(low=1, high=5),
+                         [random_state], [True, False]]
         self.classed_params = []
         self.weird_strings = {}
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc10.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc10.py
deleted file mode 100644
index f29d5dd39cce3cd2683dd7440c2f005403754387..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc10.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from .cg_desc import CGDesc
-
-
-class CGDesc10(CGDesc):
-
-    def __init__(self, random_state=None, n_max_iterations=500, n_stumps=1,
-                 **kwargs):
-        super(CGDesc10, self).__init__(n_max_iterations=100,
-                                       random_state=random_state,
-                                       n_stumps=10, )
-
-
-# def formatCmdArgs(args):
-    # """Used to format kwargs for the parsed args"""
-    # kwargsDict = {"n_stumps": args.CGD_stumps,
-    #               "n_max_iterations": args.CGD_n_iter}
-    # return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc_tree.py
deleted file mode 100644
index 3694f79d9a47bec0580074a45ed756f34c40f678..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cg_desc_tree.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomRandint
-
-
-class CGDescTree(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, n_max_iterations=500, n_stumps=1,
-                 max_depth=2, **kwargs):
-        super(CGDescTree, self).__init__(n_max_iterations=n_max_iterations,
-                                         random_state=random_state,
-                                         self_complemented=True,
-                                         twice_the_same=True,
-                                         c_bound_choice=True,
-                                         random_start=False,
-                                         n_stumps=n_stumps,
-                                         use_r=True,
-                                         c_bound_sol=True,
-                                         estimators_generator="Trees"
-                                         )
-        self.max_depth = max_depth
-        self.param_names = ["n_max_iterations", "n_stumps", "random_state",
-                            "max_depth"]
-        self.distribs = [CustomRandint(low=2, high=1000), [n_stumps],
-                         [random_state], [max_depth]]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "CGDT"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"n_stumps": args.CGDT_trees,
-#                   "n_max_iterations": args.CGDT_n_iter,
-#                   "max_depth": args.CGDT_max_depth}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boost.py
index 057df43e6c40f0755eeefeb75a3e7f7778240e1a..7984a428a16b77ef1a293f4812cd3085a2156bad 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boost.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boost.py
@@ -5,23 +5,28 @@ from ..monoview.additions.CQBoostUtils import ColumnGenerationClassifier
 from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
     BaseMonoviewClassifier
 
+classifier_class_name = "CQBoost"
 
 class CQBoost(ColumnGenerationClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, mu=0.01, epsilon=1e-06, n_stumps=1,
-                 n_max_iterations=None, **kwargs):
+                 n_max_iterations=None, estimators_generator="Stumps",
+                 max_depth=1, **kwargs):
         super(CQBoost, self).__init__(
             random_state=random_state,
             mu=mu,
             epsilon=epsilon,
-            estimators_generator="Stumps",
-            n_max_iterations=n_max_iterations
+            estimators_generator=estimators_generator,
+            n_max_iterations=n_max_iterations,
+            max_depth=max_depth
         )
         self.param_names = ["mu", "epsilon", "n_stumps", "random_state",
-                            "n_max_iterations"]
+                            "n_max_iterations", "estimators_generator",
+                            "max_depth"]
         self.distribs = [CustomUniform(loc=0.5, state=1.0, multiplier="e-"),
                          CustomRandint(low=1, high=15, multiplier="e-"),
-                         [n_stumps], [random_state], [n_max_iterations]]
+                         [n_stumps], [random_state], [n_max_iterations],
+                         ["Stumps", "Trees"], CustomRandint(low=1, high=5)]
         self.classed_params = []
         self.weird_strings = {}
         self.n_stumps = n_stumps
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boosttree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boosttree.py
deleted file mode 100644
index b999867ae0acb61eccf2515603527dfbdddc5a13..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boosttree.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import numpy as np
-
-from ..monoview.additions.BoostUtils import getInterpretBase
-from ..monoview.additions.CQBoostUtils import ColumnGenerationClassifier
-from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
-    BaseMonoviewClassifier
-
-
-class CQBoostTree(ColumnGenerationClassifier, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, mu=0.01, epsilon=1e-06, n_stumps=1,
-                 max_depth=2, n_max_iterations=100, **kwargs):
-        super(CQBoostTree, self).__init__(
-            random_state=random_state,
-            mu=mu,
-            epsilon=epsilon,
-            estimators_generator="Trees",
-            n_max_iterations=n_max_iterations
-        )
-        self.param_names = ["mu", "epsilon", "n_stumps", "random_state",
-                            "max_depth", "n_max_iterations"]
-        self.distribs = [CustomUniform(loc=0.5, state=1.0, multiplier="e-"),
-                         CustomRandint(low=1, high=15, multiplier="e-"),
-                         [n_stumps], [random_state], [max_depth],
-                         [n_max_iterations]]
-        self.classed_params = []
-        self.weird_strings = {}
-        self.n_stumps = n_stumps
-        self.max_depth = max_depth
-        if "nbCores" not in kwargs:
-            self.nbCores = 1
-        else:
-            self.nbCores = kwargs["nbCores"]
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        np.savetxt(directory + "train_metrics.csv", self.train_metrics,
-                   delimiter=',')
-        np.savetxt(directory + "c_bounds.csv", self.c_bounds,
-                   delimiter=',')
-        np.savetxt(directory + "y_test_step.csv", self.step_decisions,
-                   delimiter=',')
-        step_metrics = []
-        for step_index in range(self.step_decisions.shape[1] - 1):
-            step_metrics.append(self.plotted_metric.score(y_test,
-                                                          self.step_decisions[:,
-                                                          step_index]))
-        step_metrics = np.array(step_metrics)
-        np.savetxt(directory + "step_test_metrics.csv", step_metrics,
-                   delimiter=',')
-        return getInterpretBase(self, directory, "CQBoost", self.weights_,
-                                y_test)
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"mu": args.CQBT_mu,
-#                   "epsilon": args.CQBT_epsilon,
-#                   "n_stumps": args.CQBT_trees,
-#                   "max_depth": args.CQBT_max_depth,
-#                   "n_max_iterations": args.CQBT_n_iter}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"mu": 10 ** -randomState.uniform(0.5, 1.5),
-                          "epsilon": 10 ** -randomState.randint(1, 15)})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv2.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv2.py
deleted file mode 100644
index 0ac2d774114ff8c90d1f61a9822126f3877cf64b..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv2.py
+++ /dev/null
@@ -1,233 +0,0 @@
-import numpy as np
-
-from ..monoview.additions.BoostUtils import getInterpretBase
-from ..monoview.additions.CQBoostUtils import ColumnGenerationClassifier
-from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
-    BaseMonoviewClassifier
-
-
-class ColumnGenerationClassifierv2(ColumnGenerationClassifier):
-
-    def __init__(self, mu=0.01, epsilon=1e-06, random_state=None):
-        super(ColumnGenerationClassifierv2, self).__init__(mu=mu,
-                                                           epsilon=epsilon,
-                                                           random_state=random_state)
-
-    def initialize(self):
-        self.weights_ = []
-        self.edge_scores = []
-        self.alphas = []
-
-    def update_values(self, h_values=None, worst_h_index=None, alpha=None,
-                      w=None):
-        self.edge_scores.append(h_values[worst_h_index])
-        self.alphas.append(alpha)
-        self.weights_.append(w[-1])
-
-    def get_margins(self, w=None):
-        self.weights = np.array(self.weights_)
-        self.final_vote_weights = np.array(
-            [np.prod(1 - self.weights[t + 1:]) * self.weights_[t] if t <
-                                                                     self.weights.shape[
-                                                                         0] - 1 else
-             self.weights[t] for t in range(self.weights.shape[0])])
-        margins = np.squeeze(np.asarray(
-            np.matmul(self.classification_matrix[:, self.chosen_columns_],
-                      self.final_vote_weights)))
-        return margins
-
-    def compute_weights_(self, w=None):
-        self.weights_ = np.array(self.weights_)
-        self.final_vote_weights = np.array(
-            [np.prod(1 - self.weights_[t + 1:]) * self.weights_[t] if t <
-                                                                      self.weights_.shape[
-                                                                          0] - 1 else
-             self.weights_[t] for t in range(self.weights_.shape[0])])
-        self.weights_ = self.final_vote_weights
-
-    def get_matrix_to_optimize(self, y_kernel_matrix, w=None):
-        m = self.n_total_examples
-        if w is not None:
-            matrix_to_optimize = np.concatenate(
-                (np.matmul(self.matrix_to_optimize, w).reshape((m, 1)),
-                 y_kernel_matrix[:, self.chosen_columns_[-1]].reshape((m, 1))),
-                axis=1)
-        else:
-            matrix_to_optimize = y_kernel_matrix[:,
-                                 self.chosen_columns_[-1]].reshape((m, 1))
-        return matrix_to_optimize
-
-
-class CQBoostv2(ColumnGenerationClassifierv2, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, mu=0.01, epsilon=1e-06, **kwargs):
-        super(CQBoostv2, self).__init__(
-            random_state=random_state,
-            mu=mu,
-            epsilon=epsilon
-        )
-        self.param_names = ["mu", "epsilon"]
-        self.distribs = [CustomUniform(loc=0.5, state=1.0, multiplier="e-"),
-                         CustomRandint(low=1, high=15, multiplier="e-")]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return getInterpretBase(self, directory, "CQBoostv2", self.weights_, )
-
-    def get_name_for_fusion(self):
-        return "CQB2"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"mu": args.CQB_mu,
-#                   "epsilon": args.CQB_epsilon}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"mu": 10 ** -randomState.uniform(0.5, 1.5),
-                          "epsilon": 10 ** -randomState.randint(1, 15)})
-    return paramsSet
-
-# class CQBoostv2(CqBoostClassifierv2):
-#
-#     def __init__(self, random_state, **kwargs):
-#         super(CQBoostv2, self).__init__(
-#             mu=kwargs['mu'],
-#             epsilon=kwargs['epsilon'],
-#             n_max_iterations= kwargs['n_max_iterations'],
-#             )
-#
-#     def canProbas(self):
-#         """Used to know if the classifier can return label probabilities"""
-#         return False
-#
-#     def paramsToSrt(self, nIter=1):
-#         """Used for weighted linear early fusion to generate random search sets"""
-#         paramsSet = []
-#         for _ in range(nIter):
-#             paramsSet.append({"mu": 0.001,
-#                               "epsilon": 1e-08,
-#                               "n_max_iterations": None})
-#         return paramsSet
-#
-#     def getKWARGS(self, args):
-#         """Used to format kwargs for the parsed args"""
-#         kwargsDict = {}
-#         kwargsDict['mu'] = 0.001
-#         kwargsDict['epsilon'] = 1e-08
-#         kwargsDict['n_max_iterations'] = None
-#         return kwargsDict
-#
-#     def genPipeline(self):
-#         return Pipeline([('classifier', CqBoostClassifierv2())])
-#
-#     def genParamsDict(self, randomState):
-#         return {"classifier__mu": [0.001],
-#                 "classifier__epsilon": [1e-08],
-#                 "classifier__n_max_iterations": [None]}
-#
-#     def genBestParams(self, detector):
-#         return {"mu": detector.best_params_["classifier__mu"],
-#                 "epsilon": detector.best_params_["classifier__epsilon"],
-#                 "n_max_iterations": detector.best_params_["classifier__n_max_iterations"]}
-#
-#     def genParamsFromDetector(self, detector):
-#         nIter = len(detector.cv_results_['param_classifier__mu'])
-#         return [("mu", np.array([0.001 for _ in range(nIter)])),
-#                 ("epsilon", np.array(detector.cv_results_['param_classifier__epsilon'])),
-#                 ("n_max_iterations", np.array(detector.cv_results_['param_classifier__n_max_iterations']))]
-#
-#     def getConfig(self, config):
-#         if type(config) is not dict:  # Used in late fusion when config is a classifier
-#             return "\n\t\t- CQBoost with mu : " + str(config.mu) + ", epsilon : " + str(
-#                 config.epsilon + ", n_max_iterations : " + str(config.n_max_iterations))
-#         else:
-#             return "\n\t\t- CQBoost with mu : " + str(config["mu"]) + ", epsilon : " + str(
-#                    config["epsilon"] + ", n_max_iterations : " + str(config["n_max_iterations"]))
-#
-#
-#     def getInterpret(self, classifier, directory):
-#         interpretString = ""
-#         return interpretString
-#
-#
-# def canProbas():
-#     return False
-#
-#
-# def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
-#     """Used to fit the monoview classifier with the args stored in kwargs"""
-#     start = time.time()
-#     classifier = CqBoostClassifierv2(mu=kwargs['mu'],
-#                                    epsilon=kwargs['epsilon'],
-#                                    n_max_iterations=kwargs["n_max_iterations"],)
-#                                    # random_state=randomState)
-#     classifier.fit(DATASET, CLASS_LABELS)
-#     end = time.time()
-#     classifier.train_time =end-start
-#     return classifier
-#
-#
-# def paramsToSet(nIter, randomState):
-#     """Used for weighted linear early fusion to generate random search sets"""
-#     paramsSet = []
-#     for _ in range(nIter):
-#         paramsSet.append({"mu": randomState.uniform(1e-02, 10**(-0.5)),
-#                           "epsilon": 10**-randomState.randint(1, 15),
-#                           "n_max_iterations": None})
-#     return paramsSet
-#
-#
-# def getKWARGS(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {}
-#     kwargsDict['mu'] = args.CQB2_mu
-#     kwargsDict['epsilon'] = args.CQB2_epsilon
-#     kwargsDict['n_max_iterations'] = None
-#     return kwargsDict
-#
-#
-# def genPipeline():
-#     return Pipeline([('classifier', CqBoostClassifierv2())])
-#
-#
-# def genParamsDict(randomState):
-#     return {"classifier__mu": CustomUniform(loc=.5, state=2, multiplier='e-'),
-#                 "classifier__epsilon": CustomRandint(low=1, high=15, multiplier='e-'),
-#                 "classifier__n_max_iterations": [None]}
-#
-#
-# def genBestParams(detector):
-#     return {"mu": detector.best_params_["classifier__mu"],
-#                 "epsilon": detector.best_params_["classifier__epsilon"],
-#                 "n_max_iterations": detector.best_params_["classifier__n_max_iterations"]}
-#
-#
-# def genParamsFromDetector(detector):
-#     nIter = len(detector.cv_results_['param_classifier__mu'])
-#     return [("mu", np.array([0.001 for _ in range(nIter)])),
-#             ("epsilon", np.array(detector.cv_results_['param_classifier__epsilon'])),
-#             ("n_max_iterations", np.array(detector.cv_results_['param_classifier__n_max_iterations']))]
-#
-#
-# def getConfig(config):
-#     if type(config) is not dict:  # Used in late fusion when config is a classifier
-#         return "\n\t\t- CQBoostv2 with mu : " + str(config.mu) + ", epsilon : " + str(
-#             config.epsilon) + ", n_max_iterations : " + str(config.n_max_iterations)
-#     else:
-#         return "\n\t\t- CQBoostv2 with mu : " + str(config["mu"]) + ", epsilon : " + str(
-#             config["epsilon"]) + ", n_max_iterations : " + str(config["n_max_iterations"])
-#
-#
-# def getInterpret(classifier, directory):
-#     return getInterpretBase(classifier, directory, "CQBoostv2", classifier.final_vote_weights)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv21.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv21.py
deleted file mode 100644
index 45ae008f03476c0d3342742938e12071cd0ae754..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/cq_boostv21.py
+++ /dev/null
@@ -1,327 +0,0 @@
-import logging
-import time
-from collections import defaultdict
-
-import numpy as np
-import numpy.ma as ma
-import scipy
-from sklearn.base import BaseEstimator, ClassifierMixin
-from sklearn.metrics import accuracy_score
-from sklearn.utils.validation import check_is_fitted
-
-from ..monoview.additions.BoostUtils import StumpsClassifiersGenerator, sign, \
-    getInterpretBase, BaseBoost
-from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
-    BaseMonoviewClassifier
-
-
-class ColumnGenerationClassifierv21(BaseEstimator, ClassifierMixin, BaseBoost):
-    def __init__(self, epsilon=1e-06, n_max_iterations=None,
-                 estimators_generator=None, dual_constraint_rhs=0,
-                 save_iteration_as_hyperparameter_each=None, random_state=42):
-        super(ColumnGenerationClassifierv21, self).__init__()
-        self.epsilon = epsilon
-        self.n_max_iterations = n_max_iterations
-        self.estimators_generator = estimators_generator
-        self.dual_constraint_rhs = dual_constraint_rhs
-        self.save_iteration_as_hyperparameter_each = save_iteration_as_hyperparameter_each
-        self.random_state = random_state
-
-    def fit(self, X, y):
-        if scipy.sparse.issparse(X):
-            logging.info('Converting to dense matrix.')
-            X = np.array(X.todense())
-
-        if self.estimators_generator is None:
-            self.estimators_generator = StumpsClassifiersGenerator(
-                n_stumps_per_attribute=self.n_stumps, self_complemented=True)
-
-        y[y == 0] = -1
-
-        self.estimators_generator.fit(X, y)
-        self.classification_matrix = self._binary_classification_matrix(X)
-
-        self.weights_ = []
-        self.infos_per_iteration_ = defaultdict(list)
-
-        m, n = self.classification_matrix.shape
-        y_kernel_matrix = np.multiply(y.reshape((len(y), 1)),
-                                      self.classification_matrix)
-
-        # Initialization
-
-        w = None
-        self.collected_weight_vectors_ = {}
-        self.collected_dual_constraint_violations_ = {}
-
-        example_weights = self._initialize_alphas(m).reshape((m, 1))
-
-        self.chosen_columns_ = []
-        self.fobidden_columns = []
-        self.edge_scores = []
-        self.example_weights_ = [example_weights]
-        self.train_accuracies = []
-        self.previous_votes = []
-
-        self.n_total_hypotheses_ = n
-        self.n_total_examples = m
-        # print("\n \t\t Start fit\n")
-        for k in range(min(n,
-                           self.n_max_iterations if self.n_max_iterations is not None else np.inf)):
-            # Find worst weak hypothesis given alpha.
-            new_voter_index, criterion = self._find_new_voter(example_weights,
-                                                              y_kernel_matrix,
-                                                              "pseudo_h")
-
-            # Append the weak hypothesis.
-            self.chosen_columns_.append(new_voter_index)
-            self.fobidden_columns.append(new_voter_index)
-            new_voter_margin = y_kernel_matrix[:,
-                               self.chosen_columns_[-1]].reshape((m, 1))
-            self.edge_scores.append(criterion)
-
-            if w is None:
-                self.previous_vote = new_voter_margin
-                w = 1
-                self.weights_.append(w)
-                example_weights = self._update_example_weights(example_weights,
-                                                               y_kernel_matrix,
-                                                               m)
-                self.example_weights_.append(example_weights)
-                self.train_accuracies.append(
-                    accuracy_score(y, np.sign(self.previous_vote)))
-                continue
-
-            # ---- On resoud le probleme a deux votants analytiquement.
-            w = self._solve_two_weights_min_c(new_voter_margin, example_weights)
-            if w[0] == "break":
-                self.chosen_columns_.pop()
-                self.break_cause = w[1]
-                break
-            self.previous_vote = np.matmul(
-                np.concatenate((self.previous_vote, new_voter_margin), axis=1),
-                w).reshape((m, 1))
-
-            # We collect iteration information for later evaluation.
-            self.weights_.append(w[-1])
-
-            self.weights = np.array(self.weights_)
-            self.final_vote_weights = np.array(
-                [np.prod(1 - self.weights[t + 1:]) * self.weights[t] if t <
-                                                                        self.weights.shape[
-                                                                            0] - 1 else
-                 self.weights[t] for t in range(self.weights.shape[0])])
-            margins = np.squeeze(np.asarray(
-                np.matmul(self.classification_matrix[:, self.chosen_columns_],
-                          self.final_vote_weights)))
-            signs_array = np.array([int(x) for x in sign(margins)])
-            self.train_accuracies.append(accuracy_score(y, signs_array))
-
-            # ---- On change l'edge
-            example_weights = self._update_example_weights(example_weights,
-                                                           y_kernel_matrix, m)
-            self.example_weights_.append(example_weights)
-
-        self.nb_opposed_voters = self.check_opposed_voters()
-        self.estimators_generator.estimators_ = \
-        self.estimators_generator.estimators_[self.chosen_columns_]
-
-        y[y == -1] = 0
-
-        return self
-
-    def predict(self, X):
-        start = time.time()
-        check_is_fitted(self, 'weights_')
-
-        if scipy.sparse.issparse(X):
-            logging.warning('Converting sparse matrix to dense matrix.')
-            X = np.array(X.todense())
-        classification_matrix = self._binary_classification_matrix(X)
-        self.weights_ = np.array(self.weights_)
-        self.final_vote_weights = np.array([np.prod(1 - self.weights_[t + 1:]) *
-                                            self.weights_[t] if t <
-                                                                self.weights_.shape[
-                                                                    0] - 1 else
-                                            self.weights_[t] for t in
-                                            range(self.weights_.shape[0])])
-        margins = np.squeeze(np.asarray(
-            np.matmul(classification_matrix, self.final_vote_weights)))
-        signs_array = np.array([int(x) for x in sign(margins)])
-        signs_array[signs_array == -1] = 0
-        end = time.time()
-        self.predict_time = end - start
-        return signs_array
-
-    def _find_new_voter(self, example_weights, y_kernel_matrix,
-                        type="pseudo_h"):
-        if type == "pseudo_h":
-            pseudo_h_values = ma.array(
-                np.squeeze(np.array(example_weights.T.dot(y_kernel_matrix).T)),
-                fill_value=-np.inf)
-            pseudo_h_values[self.fobidden_columns] = ma.masked
-            worst_h_index = ma.argmax(pseudo_h_values)
-            return worst_h_index, pseudo_h_values[worst_h_index]
-        elif type == "random":
-            new_index = self.random_state.choice(
-                np.arange(self.n_total_hypotheses_))
-            while new_index in self.fobidden_columns:
-                new_index = self.random_state.choice(
-                    np.arange(self.n_total_hypotheses_))
-            return new_index, 100
-
-    def _update_example_weights(self, example_weights, y_kernel_matrix, m):
-        if len(self.weights_) == 1:
-            example_weights[self.previous_vote == -1] *= 2
-            example_weights[self.previous_vote == 1] /= 2
-            pass
-        else:
-            weights = np.array(self.weights_)
-            current_vote_weights = np.array(
-                [np.prod(1 - weights[t + 1:]) * weights[t] if t <
-                                                              weights.shape[
-                                                                  0] - 1 else
-                 weights[t] for t in range(weights.shape[0])]).reshape(
-                (weights.shape[0], 1))
-            weighted_margin = np.matmul(
-                y_kernel_matrix[:, self.chosen_columns_], current_vote_weights)
-            example_weights = np.multiply(example_weights,
-                                          np.exp((1 - np.sum(weighted_margin,
-                                                             axis=1) /
-                                                  np.sum(weighted_margin,
-                                                         axis=1))).reshape(
-                                              (m, 1)))
-        return example_weights
-
-    def _solve_two_weights_min_c(self, next_column, example_weights):
-        m = next_column.shape[0]
-        zero_diag = np.ones((m, m)) - np.identity(m)
-
-        weighted_previous_vote = self.previous_vote.reshape((m, 1))
-        weighted_next_column = next_column.reshape((m, 1))
-
-        mat_prev = np.repeat(weighted_previous_vote, m, axis=1) * zero_diag
-        mat_next = np.repeat(weighted_next_column, m, axis=1) * zero_diag
-
-        self.B2 = np.sum((weighted_previous_vote - weighted_next_column) ** 2)
-        self.B1 = np.sum(2 * weighted_next_column * (
-                    weighted_previous_vote - 2 * weighted_next_column * weighted_next_column))
-        self.B0 = np.sum(weighted_next_column * weighted_next_column)
-
-        self.A2 = self.B2 + np.sum(
-            (mat_prev - mat_next) * np.transpose(mat_prev - mat_next))
-        self.A1 = self.B1 + np.sum(
-            mat_prev * np.transpose(mat_next) - mat_next * np.transpose(
-                mat_prev) - 2 * mat_next * np.transpose(mat_next))
-        self.A0 = self.B0 + np.sum(mat_next * np.transpose(mat_next))
-
-        C2 = (self.A1 * self.B2 - self.A2 * self.B1)
-        C1 = 2 * (self.A0 * self.B2 - self.A2 * self.B0)
-        C0 = self.A0 * self.B1 - self.A1 * self.B0
-
-        if C2 == 0:
-            if C1 == 0:
-                return np.array([0.5, 0.5])
-            elif abs(C1) > 0:
-                return np.array([0., 1.])
-            else:
-                return ['break', "the derivate was constant."]
-        elif C2 == 0:
-            return ["break", "the derivate was affine."]
-
-        sols = np.roots(np.array([C2, C1, C0]))
-
-        is_acceptable, sol = self._analyze_solutions(sols)
-        if is_acceptable:
-            # print("cb", self._cborn(sol))
-            return np.array([sol, 1 - sol])
-        else:
-            return ["break", sol]
-
-    def _analyze_solutions(self, sols):
-        if sols.shape[0] == 1:
-            if self._cborn(sols[0]) < self._cborn(sols[0] + 1):
-                best_sol = sols[0]
-            else:
-                return False, " the only solution was a maximum."
-        elif sols.shape[0] == 2:
-            best_sol = self._best_sol(sols)
-        else:
-            return False, " no solution were found."
-
-        if 0 < best_sol < 1:
-            return True, self._best_sol(sols)
-
-        elif best_sol <= 0:
-            return False, " the minimum was below 0."
-        else:
-            return False, " the minimum was over 1."
-
-    def _cborn(self, sol):
-        return 1 - (self.A2 * sol ** 2 + self.A1 * sol + self.A0) / (
-                    self.B2 * sol ** 2 + self.B1 * sol + self.B0)
-
-    def _best_sol(self, sols):
-        values = np.array([self._cborn(sol) for sol in sols])
-        return sols[np.argmin(values)]
-
-    def _restricted_master_problem(self, y_kernel_matrix):
-        raise NotImplementedError("Restricted master problem not implemented.")
-
-
-class CqBoostClassifierv21(ColumnGenerationClassifierv21):
-    def __init__(self, mu=0.001, epsilon=1e-08, n_max_iterations=None,
-                 estimators_generator=None,
-                 save_iteration_as_hyperparameter_each=None, random_state=42):
-        super(CqBoostClassifierv21, self).__init__(epsilon, n_max_iterations,
-                                                   estimators_generator,
-                                                   dual_constraint_rhs=0,
-                                                   save_iteration_as_hyperparameter_each=save_iteration_as_hyperparameter_each,
-                                                   random_state=random_state)
-        self.train_time = 0
-        self.mu = mu
-
-    def _initialize_alphas(self, n_examples):
-        return 1.0 / n_examples * np.ones((n_examples,))
-
-
-class CQBoostv21(CqBoostClassifierv21, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, mu=0.01, epsilon=1e-06, **kwargs):
-        super(CQBoostv21, self).__init__(
-            random_state=random_state,
-            mu=mu,
-            epsilon=epsilon
-        )
-        self.param_names = ["mu", "epsilon"]
-        self.distribs = [CustomUniform(loc=0.5, state=1.0, multiplier="e-"),
-                         CustomRandint(low=1, high=15, multiplier="e-")]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return getInterpretBase(self, directory, "CQBoostv21", self.weights_,
-                                self.break_cause)
-
-    def get_name_for_fusion(self):
-        return "CQ21"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"mu": args.CQB2_mu,
-#                   "epsilon": args.CQB2_epsilon}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"mu": 10 ** -randomState.uniform(0.5, 1.5),
-                          "epsilon": 10 ** -randomState.randint(1, 15)})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree_pregen.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree_pregen.py
index f02b476d244c04dc53ca56bd867213aa359e5697..124d7a9066f9d5ea480474134d10148f803e1120 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree_pregen.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree_pregen.py
@@ -11,6 +11,7 @@ from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+classifier_class_name = "DecisionTreePregen"
 
 class DecisionTreePregen(DecisionTreeClassifier, BaseMonoviewClassifier,
                          PregenClassifier):
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
index 5327aae252b6f2fbcc09f21b98ee077997700046..7fd870f08577bfb4b13698284b0d8434c8ad205c 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
@@ -13,6 +13,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "GradientBoosting"
+
 class CustomDecisionTree(DecisionTreeClassifier):
     def predict(self, X, check_input=True):
         y_pred = super(CustomDecisionTree, self).predict(X,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
index 7e7fbb3d816efb5f45390667178bfdb60d286165..68ecfa692da5ac88051a8993df40dd0581887ac9 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
@@ -7,6 +7,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "KNN"
+
 class KNN(KNeighborsClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, n_neighbors=5,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
index 1e5c13e468e5525ac90343f2424c61fc794494b0..30af6f5b1839a68ab13bfe7dab37bda9eb3db1d3 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
@@ -9,6 +9,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "Lasso"
+
 class Lasso(LassoSK, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, alpha=1.0,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq.py
index d5be24075fa9f6dc7db56403a4e66c3c2a14147e..21345552b14dc0c2493e3d92d88a152b46de5a80 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq.py
@@ -21,6 +21,8 @@ from sklearn.metrics.pairwise import rbf_kernel, linear_kernel, \
 from ..monoview.additions.BoostUtils import ConvexProgram as QP
 
 
+classifier_class_name = "MinCQ"
+
 # from majority_vote import MajorityVote
 # from voter import StumpsVotersGenerator, KernelVotersGenerator
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy.py
index 3ffa26d2f13f97c67311dfb25e21347b8ba74630..c53187463a1c3045300f7887739a870b194cb7cb 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy.py
@@ -5,6 +5,8 @@ from ..monoview.additions.MinCQUtils import RegularizedBinaryMinCqClassifier
 from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomUniform
 
 
+classifier_class_name = "MinCQGraalpy"
+
 class MinCQGraalpy(RegularizedBinaryMinCqClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, mu=0.01, self_complemented=True,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy_tree.py
index 096bc62ef181b596de6499f89d3876fc02ea74dd..0bc6b73d2f9561ad669b1207781cf613bc88d2b9 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy_tree.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/min_cq_graalpy_tree.py
@@ -4,6 +4,7 @@ from ..monoview.additions.BoostUtils import TreeClassifiersGenerator
 from ..monoview.additions.MinCQUtils import RegularizedBinaryMinCqClassifier
 from ..monoview.monoview_utils import BaseMonoviewClassifier, CustomUniform
 
+classifier_class_name = "MinCQGraalpyTree"
 
 class MinCQGraalpyTree(RegularizedBinaryMinCqClassifier,
                        BaseMonoviewClassifier):
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost.py
deleted file mode 100644
index fe02c038307329280be0dc7ab13082a08c1850ba..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier
-
-
-class QarBoost(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, **kwargs):
-        super(QarBoost, self).__init__(n_max_iterations=500,
-                                       random_state=random_state,
-                                       self_complemented=True,
-                                       twice_the_same=True,
-                                       c_bound_choice=True,
-                                       random_start=False,
-                                       n_stumps=10,
-                                       use_r=True,
-                                       c_bound_sol=False
-                                       )
-        # n_stumps_per_attribute=10,
-        self.param_names = []
-        self.distribs = []
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "QB"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost_nc3.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost_nc3.py
deleted file mode 100644
index 06a9c186f24a71a7c1113e9cda3a01a916896c97..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boost_nc3.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier
-
-
-class QarBoostNC3(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, **kwargs):
-        super(QarBoostNC3, self).__init__(n_max_iterations=300,
-                                          random_state=random_state,
-                                          self_complemented=True,
-                                          twice_the_same=False,
-                                          c_bound_choice=True,
-                                          random_start=False,
-                                          n_stumps=1,
-                                          use_r=True,
-                                          c_bound_sol=True
-                                          )
-        # n_stumps_per_attribute=1,
-        self.param_names = []
-        self.distribs = []
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "QBN3"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv2.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv2.py
deleted file mode 100644
index c3936a3cbbd6344a6c76413d266c73f5732fa952..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv2.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier
-
-
-class QarBoostv2(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, **kwargs):
-        super(QarBoostv2, self).__init__(n_max_iterations=300,
-                                         random_state=random_state,
-                                         self_complemented=True,
-                                         twice_the_same=False,
-                                         c_bound_choice=True,
-                                         random_start=False,
-                                         n_stumps=1,
-                                         use_r=True,
-                                         c_bound_sol=False
-                                         )
-        # n_stumps_per_attribute = 1,
-        self.param_names = []
-        self.distribs = []
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "QBv2"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv3.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv3.py
deleted file mode 100644
index 954ab0b5362715c53a241870416975fbf6d19b53..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/qar_boostv3.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from ..monoview.additions.CGDescUtils import ColumnGenerationClassifierQar
-from ..monoview.monoview_utils import BaseMonoviewClassifier
-
-
-class QarBoostv3(ColumnGenerationClassifierQar, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, **kwargs):
-        super(QarBoostv3, self).__init__(
-            random_state=random_state,
-            self_complemented=False,
-            twice_the_same=False,
-            c_bound_choice=True,
-            random_start=True,
-            n_stumps=1,
-            use_r=False
-        )
-        # old_fashioned=False
-        # previous_vote_weighted=False,
-        # two_wieghts_problem=False,
-        # divided_ponderation=True,
-        # n_stumps_per_attribute=1,
-        self.param_names = []
-        self.classed_params = []
-        self.distribs = []
-        self.weird_strings = {}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        return self.getInterpretQar(directory, y_test)
-
-    def get_name_for_fusion(self):
-        return "QBv3"
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
index 2a2f067f6bf9ef6b4c7f32291b69d3602177cdcb..6fe0dcadea76b63220d06eac9204fe1d83c02fb6 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
@@ -7,6 +7,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "RandomForest"
+
 class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, n_estimators=10,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm.py
index 1265410bbcb9643af005bda1b8737a8b992ed553..f5c3df8f2a4bcccaf15ed0f0a63e00a859b368f2 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm.py
@@ -41,6 +41,8 @@ __status__ = "Prototype"  # Production, Development, Prototype
 #     #     return {"Binary_attributes": self.clf.model_.rules}
 
 
+classifier_class_name = "SCM"
+
 class SCM(scm, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, model_type="conjunction",
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen.py
index 46fc905a283214f38695d8f8b290360b91b95e57..ced608365e87a9f0e80daf5f2a6db941b5062808 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen.py
@@ -11,12 +11,13 @@ from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+classifier_class_name = "SCMPregen"
 
-class SCMPregen(scm, BaseMonoviewClassifier, PregenClassifier):
+class SCMPregen(BaseMonoviewClassifier, PregenClassifier, scm):
 
     def __init__(self, random_state=None, model_type="conjunction",
                  max_rules=10, p=0.1, n_stumps=10, self_complemented=True,
-                 **kwargs):
+                 estimators_generator="Stumps", max_depth=1, **kwargs):
         super(SCMPregen, self).__init__(
             random_state=random_state,
             model_type=model_type,
@@ -24,16 +25,25 @@ class SCMPregen(scm, BaseMonoviewClassifier, PregenClassifier):
             p=p
         )
         self.param_names = ["model_type", "max_rules", "p", "n_stumps",
-                            "random_state"]
+                            "random_state", "estimators_generator", "max_depth"]
         self.distribs = [["conjunction", "disjunction"],
                          CustomRandint(low=1, high=15),
                          CustomUniform(loc=0, state=1), [n_stumps],
-                         [random_state]]
+                         [random_state], ["Stumps", "Tree"],
+                         CustomRandint(low=1, high=5)]
         self.classed_params = []
         self.weird_strings = {}
         self.self_complemented = self_complemented
         self.n_stumps = n_stumps
-        self.estimators_generator = "Stumps"
+        self.estimators_generator = estimators_generator
+        self.max_depth=1
+
+    def get_params(self, deep=True):
+        params = super(SCMPregen, self).get_params(deep)
+        params["estimators_generator"] = self.estimators_generator
+        params["max_depth"] = self.max_depth
+        params["n_stumps"] = self.n_stumps
+        return params
 
     def fit(self, X, y, tiebreaker=None, iteration_callback=None, **fit_params):
         pregen_X, _ = self.pregen_voters(X, y)
@@ -72,11 +82,6 @@ class SCMPregen(scm, BaseMonoviewClassifier, PregenClassifier):
         os.remove(file_name)
         return self.classes_[self.model_.predict(place_holder)]
 
-    def get_params(self, deep=True):
-        return {"p": self.p, "model_type": self.model_type,
-                "max_rules": self.max_rules,
-                "random_state": self.random_state, "n_stumps": self.n_stumps}
-
     def canProbas(self):
         """Used to know if the classifier can return label probabilities"""
         return False
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen_tree.py
deleted file mode 100644
index 1b85c4fc3009269f695d45ef438c5b55c1dcae3e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_pregen_tree.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import os
-
-import numpy as np
-from pyscm.scm import SetCoveringMachineClassifier as scm
-
-from ..monoview.additions.PregenUtils import PregenClassifier
-from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
-    BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-class SCMPregenTree(scm, BaseMonoviewClassifier, PregenClassifier):
-
-    def __init__(self, random_state=None, model_type="conjunction",
-                 max_rules=10, p=0.1, n_stumps=10, self_complemented=True,
-                 max_depth=2, **kwargs):
-        super(SCMPregenTree, self).__init__(
-            random_state=random_state,
-            model_type=model_type,
-            max_rules=max_rules,
-            p=p
-        )
-        self.param_names = ["model_type", "max_rules", "p", "n_stumps",
-                            "random_state", "max_depth"]
-        self.distribs = [["conjunction", "disjunction"],
-                         CustomRandint(low=1, high=15),
-                         CustomUniform(loc=0, state=1), [n_stumps],
-                         [random_state], [max_depth]]
-        self.classed_params = []
-        self.weird_strings = {}
-        self.max_depth = max_depth
-        self.self_complemented = self_complemented
-        self.random_state = random_state
-        self.n_stumps = n_stumps
-        self.estimators_generator = "Stumps"
-
-    def fit(self, X, y, tiebreaker=None, iteration_callback=None, **fit_params):
-        pregen_X, _ = self.pregen_voters(X, y, generator="Trees")
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        super(SCMPregenTree, self).fit(place_holder, y, tiebreaker=tiebreaker,
-                                       iteration_callback=iteration_callback,
-                                       **fit_params)
-        return self
-
-    def predict(self, X):
-        pregen_X, _ = self.pregen_voters(X, )
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        return self.classes_[self.model_.predict(place_holder)]
-
-    def get_params(self, deep=True):
-        return {"p": self.p, "model_type": self.model_type,
-                "max_rules": self.max_rules,
-                "random_state": self.random_state, "n_stumps": self.n_stumps,
-                "max_depth": self.max_depth}
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return False
-
-    def getInterpret(self, directory, y_test):
-        interpretString = "Model used : " + str(self.model_)
-        return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"model_type": args.SCPT_model_type,
-#                   "p": args.SCPT_p,
-#                   "max_rules": args.SCPT_max_rules,
-#                   "n_stumps": args.SCPT_trees,
-#                   "max_depth": args.SCPT_max_depth}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append(
-            {"model_type": randomState.choice(["conjunction", "disjunction"]),
-             "max_rules": randomState.randint(1, 15),
-             "p": randomState.random_sample()})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity.py
deleted file mode 100644
index 57feac12373a23d05019bf98f55699a1f9012571..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import os
-import time
-
-import numpy as np
-from pyscm.scm import SetCoveringMachineClassifier as scm
-
-from ..metrics import zero_one_loss
-from ..monoview.additions.PregenUtils import PregenClassifier
-from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
-    BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-class SCMSparsity(BaseMonoviewClassifier, PregenClassifier):
-
-    def __init__(self, random_state=None, model_type="disjunction",
-                 max_rules=10, p=0.1, n_stumps=1, self_complemented=True,
-                 **kwargs):
-        self.scm_estimators = [scm(
-            random_state=random_state,
-            model_type=model_type,
-            max_rules=max_rule + 1,
-            p=p
-        ) for max_rule in range(max_rules)]
-        self.model_type = model_type
-        self.self_complemented = self_complemented
-        self.n_stumps = n_stumps
-        self.p = p
-        self.random_state = random_state
-        self.max_rules = max_rules
-        self.param_names = ["model_type", "max_rules", "p", "random_state",
-                            "n_stumps"]
-        self.distribs = [["conjunction", "disjunction"],
-                         CustomRandint(low=1, high=15),
-                         CustomUniform(loc=0, state=1), [random_state],
-                         [n_stumps]]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def get_params(self):
-        return {"model_type": self.model_type, "p": self.p,
-                "max_rules": self.max_rules, "random_state": self.random_state,
-                "n_stumps": self.n_stumps}
-
-    def fit(self, X, y, tiebreaker=None, iteration_callback=None, **fit_params):
-        pregen_X, _ = self.pregen_voters(X, y)
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        for scm_estimator in self.scm_estimators:
-            beg = time.time()
-            scm_estimator.fit(place_holder, y, tiebreaker=None,
-                              iteration_callback=None, **fit_params)
-            end = time.time()
-        self.times = np.array([end - beg, 0])
-        self.train_metrics = [
-            zero_one_loss.score(y, scm_estimator.predict(place_holder)) for
-            scm_estimator in self.scm_estimators]
-        return self.scm_estimators[-1]
-
-    def predict(self, X):
-        pregen_X, _ = self.pregen_voters(X, )
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        self.preds = [scm_estimator.predict(place_holder) for scm_estimator in
-                      self.scm_estimators]
-        return self.preds[-1]
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        interpretString = ""
-        np.savetxt(directory + "test_metrics.csv", np.array(
-            [zero_one_loss.score(y_test, pred) for pred in self.preds]))
-        np.savetxt(directory + "times.csv", self.times)
-        np.savetxt(directory + "train_metrics.csv", self.train_metrics)
-        return interpretString
-
-#
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"model_type": args.SCS_model_type,
-#                   "p": args.SCS_p,
-#                   "max_rules": args.SCS_max_rules,
-#                   "n_stumps": args.SCS_stumps}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append(
-            {"model_type": randomState.choice(["conjunction", "disjunction"]),
-             "max_rules": randomState.randint(1, 15),
-             "p": randomState.random_sample()})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity_ttee.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity_ttee.py
deleted file mode 100644
index 25c91fca68d909a6bc947c33e92cb9bf228dec5e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/scm_sparsity_ttee.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import os
-import time
-
-import numpy as np
-from pyscm.scm import SetCoveringMachineClassifier as scm
-
-from ..metrics import zero_one_loss
-from ..monoview.additions.PregenUtils import PregenClassifier
-from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
-    BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-class SCMSparsityTree(BaseMonoviewClassifier, PregenClassifier):
-
-    def __init__(self, random_state=None, model_type="conjunction",
-                 max_rules=10, p=0.1, n_stumps=1, max_depth=2, **kwargs):
-        self.scm_estimators = [scm(
-            random_state=random_state,
-            model_type=model_type,
-            max_rules=max_rule + 1,
-            p=p
-        ) for max_rule in range(max_rules)]
-        self.model_type = model_type
-        self.max_depth = max_depth
-        self.p = p
-        self.n_stumps = n_stumps
-        self.random_state = random_state
-        self.max_rules = max_rules
-        self.param_names = ["model_type", "max_rules", "p", "random_state",
-                            "max_depth"]
-        self.distribs = [["conjunction", "disjunction"],
-                         CustomRandint(low=1, high=15),
-                         CustomUniform(loc=0, state=1), [random_state],
-                         [max_depth]]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def get_params(self):
-        return {"model_type": self.model_type, "p": self.p,
-                "max_rules": self.max_rules, "random_state": self.random_state,
-                "max_depth": self.max_depth, "n_stumps": self.n_stumps}
-
-    def fit(self, X, y, tiebreaker=None, iteration_callback=None, **fit_params):
-        pregen_X, _ = self.pregen_voters(X, y, generator="Trees")
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        for scm_estimator in self.scm_estimators:
-            beg = time.time()
-            scm_estimator.fit(place_holder, y, tiebreaker=None,
-                              iteration_callback=None, **fit_params)
-            end = time.time()
-        self.times = np.array([end - beg, 0])
-        self.train_metrics = [
-            zero_one_loss.score(y, scm_estimator.predict(place_holder)) for
-            scm_estimator in self.scm_estimators]
-        return self.scm_estimators[-1]
-
-    def predict(self, X):
-        pregen_X, _ = self.pregen_voters(X, generator="Trees")
-        list_files = os.listdir(".")
-        a = int(self.random_state.randint(0, 10000))
-        if "pregen_x" + str(a) + ".csv" in list_files:
-            a = int(np.random.randint(0, 10000))
-            file_name = "pregen_x" + str(a) + ".csv"
-            while file_name in list_files:
-                a = int(np.random.randint(0, 10000))
-                file_name = "pregen_x" + str(a) + ".csv"
-        else:
-            file_name = "pregen_x" + str(a) + ".csv"
-        np.savetxt(file_name, pregen_X, delimiter=',')
-        place_holder = np.genfromtxt(file_name, delimiter=',')
-        os.remove(file_name)
-        self.preds = [scm_estimator.predict(place_holder) for scm_estimator in
-                      self.scm_estimators]
-        return self.preds[-1]
-
-    def canProbas(self):
-        """Used to know if the classifier can return label probabilities"""
-        return True
-
-    def getInterpret(self, directory, y_test):
-        interpretString = ""
-        np.savetxt(directory + "test_metrics.csv", np.array(
-            [zero_one_loss.score(y_test, pred) for pred in self.preds]))
-        np.savetxt(directory + "times.csv", self.times)
-        np.savetxt(directory + "train_metrics.csv", self.train_metrics)
-        return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"model_type": args.SCST_model_type,
-#                   "p": args.SCST_p,
-#                   "max_rules": args.SCST_max_rules,
-#                   "n_stumps": args.SCST_trees,
-#                   "max_depth": args.SCST_max_depth}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append(
-            {"model_type": randomState.choice(["conjunction", "disjunction"]),
-             "max_rules": randomState.randint(1, 15),
-             "p": randomState.random_sample()})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
index d0546f409615ee9b6cd932509454b5643755bf9e..d43d372c0c0adca31236c56947af00c01ab6b42b 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
@@ -6,6 +6,7 @@ from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+classifier_class_name = "SGD"
 
 class SGD(SGDClassifier, BaseMonoviewClassifier):
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
index 53ac5226e81914c207474b74bd9e7f0c4b31916d..87f4c4ed4511f41e160223178f023780fa87f6e9 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
@@ -6,6 +6,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "SVMLinear"
+
 class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, C=1.0, **kwargs):
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
index 52a24a29b53402b6495bb526f4294d70a35d982d..386868656e84286bb1979539f1c93e197a4f011a 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
@@ -6,6 +6,7 @@ from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+classifier_class_name = "SVMPoly"
 
 class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
 
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
index 9a5853cfa55a93bcac0f9782e4b3e43ec04bbce1..f2ac82543e90b47a2e9126116ef98846ef765740 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
@@ -6,6 +6,8 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
+classifier_class_name = "SVMRBF"
+
 class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, C=1.0, **kwargs):
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py b/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
index 0deca9b6c96419d815de93a6d585385fefee6e40..4f746c5f2c12c0018e7286b77770e1c54a0866d9 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview/analyze_results.py
@@ -5,9 +5,9 @@ __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
-def printMetricScore(metricScores, metrics):
+def printMetricScore(metricScores, metric_list):
     metricScoreString = "\n\n"
-    for metric in metrics:
+    for metric in metric_list:
         metricModule = getattr(metrics, metric[0])
         if metric[1] is not None:
             metricKWARGS = dict((index, metricConfig) for index, metricConfig in
@@ -61,16 +61,16 @@ def execute(classifier, trainLabels,
             classificationKWARGS, classificationIndices,
             LABELS_DICTIONARY, views, nbCores, times,
             name, KFolds,
-            hyperParamSearch, nIter, metrics,
+            hyperParamSearch, nIter, metric_list,
             viewsIndices, randomState, labels, classifierModule):
     classifierNameString = classifierModule.genName(classificationKWARGS)
     CLASS_LABELS = labels
     learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
 
-    metricModule = getattr(metrics, metrics[0][0])
-    if metrics[0][1] is not None:
+    metricModule = getattr(metrics, metric_list[0][0])
+    if metric_list[0][1] is not None:
         metricKWARGS = dict((index, metricConfig) for index, metricConfig in
-                            enumerate(metrics[0][1]))
+                            enumerate(metric_list[0][1]))
     else:
         metricKWARGS = {}
     scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices],
@@ -82,7 +82,7 @@ def execute(classifier, trainLabels,
     classifierConfiguration = classifier.getConfigString(classificationKWARGS)
 
     stringAnalysis = "\t\tResult for multiview classification with " + classifierNameString + \
-                     "\n\n" + metrics[0][0] + " :\n\t-On Train : " + str(
+                     "\n\n" + metric_list[0][0] + " :\n\t-On Train : " + str(
         scoreOnTrain) + "\n\t-On Test : " + str(
         scoreOnTest) + \
                      "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
@@ -92,9 +92,9 @@ def execute(classifier, trainLabels,
         KFolds.n_splits) + \
                      " folds\n\nClassification configuration : \n\t-Algorithm used : " + classifierNameString + " with : " + classifierConfiguration
 
-    metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
+    metricsScores = getMetricsScores(metric_list, trainLabels, testLabels,
                                      validationIndices, learningIndices, labels)
-    stringAnalysis += printMetricScore(metricsScores, metrics)
+    stringAnalysis += printMetricScore(metricsScores, metric_list)
     stringAnalysis += "\n\n Interpretation : \n\n" + classifier.getSpecificAnalysis(
         classificationKWARGS)
     imagesAnalysis = {}
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
index 5821bf0261b618ba2d945c3e20a20e70bce25202..271784d9f6c56faabd514da37210ac51819d962a 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
@@ -93,7 +93,7 @@ def makeMeNoisy(viewData, randomState, percentage=5):
 def getPlausibleDBhdf5(features, pathF, name, NB_CLASS=3, LABELS_NAME="",
                        randomState=None, full=True, add_noise=False,
                        noise_std=0.15, nbView=3,
-                   nbClass=2, datasetLength=100, randomStateInt=42, nbFeatures = 5):
+                   nbClass=2, datasetLength=100, randomStateInt=42, nbFeatures = 10):
     """Used to generate a plausible dataset to test the algorithms"""
 
     if not os.path.exists(os.path.dirname(pathF + "Plausible.hdf5")):
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py b/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
index 5036232294ec6abac6551bd34356f2a8de16fe0c..43833e25ab266ec060dcbf24394c0717cf65abb8 100644
--- a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
@@ -32,14 +32,8 @@ def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices,
                             enumerate(metric[1]))
     else:
         metricKWARGS = {}
-    try:
-        trainScore = metricModule.score(labels[learningIndices], trainLabels,
+    trainScore = metricModule.score(labels[learningIndices], trainLabels,
                                         **metricKWARGS)
-    except:
-        print(labels[learningIndices])
-        print(trainLabels)
-        import pdb;
-        pdb.set_trace()
     testScore = metricModule.score(labels[validationIndices], testLabels,
                                    **metricKWARGS)
     return [trainScore, testScore]