From 775329732b6810298ef6951f36bc3a2f43cd45bc Mon Sep 17 00:00:00 2001
From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr>
Date: Fri, 21 Feb 2020 16:07:12 +0100
Subject: [PATCH] Cleaning code

---
 .../monoview/analyze_result.py                | 21 +++--
 .../monoview/exec_plot.py                     | 89 -------------------
 .../monoview/run.py                           |  8 --
 .../monoview_classifiers/adaboost.py          | 25 +-----
 .../monoview_classifiers/decision_tree.py     | 23 +----
 .../monoview_classifiers/gradient_boosting.py | 25 +-----
 .../monoview_classifiers/knn.py               | 29 +-----
 .../monoview_classifiers/lasso.py             | 35 ++------
 .../monoview_classifiers/random_forest.py     | 30 +------
 .../monoview_classifiers/sgd.py               | 33 +------
 .../monoview_classifiers/svm_linear.py        | 14 +--
 .../monoview_classifiers/svm_poly.py          | 16 +---
 .../monoview_classifiers/svm_rbf.py           | 27 +-----
 .../additions/diversity_utils.py              |  2 +-
 .../additions/jumbo_fusion_utils.py           |  4 +-
 .../additions/late_fusion_utils.py            |  2 +-
 .../bayesian_inference_fusion.py              |  2 +-
 .../majority_voting_fusion.py                 |  2 +-
 .../multiview_classifiers/svm_jumbo_fusion.py |  2 +-
 .../weighted_linear_early_fusion.py           |  2 +-
 .../weighted_linear_late_fusion.py            |  2 +-
 21 files changed, 45 insertions(+), 348 deletions(-)
 delete mode 100644 multiview_platform/mono_multi_view_classifiers/monoview/exec_plot.py
 delete mode 100644 multiview_platform/mono_multi_view_classifiers/monoview/run.py

diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
index 77f81473..0f7c93d7 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/analyze_result.py
@@ -4,7 +4,7 @@ from .. import metrics
 
 
 def getDBConfigString(name, feat, classification_indices, shape,
-                      classLabelsNames, KFolds):
+                      class_labels_names, KFolds):
     """
     
     Parameters
@@ -13,7 +13,7 @@ def getDBConfigString(name, feat, classification_indices, shape,
     feat
     classification_indices
     shape
-    classLabelsNames
+    class_labels_names
     KFolds
 
     Returns
@@ -21,13 +21,14 @@ def getDBConfigString(name, feat, classification_indices, shape,
 
     """
     learningRate = float(len(classification_indices[0])) / (
-                len(classification_indices[0]) + len(classification_indices[1]))
+            len(classification_indices[0]) + len(classification_indices[1]))
     dbConfigString = "Database configuration : \n"
     dbConfigString += "\t- Database name : " + name + "\n"
     dbConfigString += "\t- View name : " + feat + "\t View shape : " + str(
         shape) + "\n"
     dbConfigString += "\t- Learning Rate : " + str(learningRate) + "\n"
-    dbConfigString += "\t- Labels used : " + ", ".join(classLabelsNames) + "\n"
+    dbConfigString += "\t- Labels used : " + ", ".join(
+        class_labels_names) + "\n"
     dbConfigString += "\t- Number of cross validation folds : " + str(
         KFolds.n_splits) + "\n\n"
     return dbConfigString
@@ -42,7 +43,8 @@ def getClassifierConfigString(gridSearch, nbCores, nIter, clKWARGS, classifier,
         classifierConfigString += "\t- Got configuration using randomized search with " + str(
             nIter) + " iterations \n"
     classifierConfigString += "\n\n"
-    classifierInterpretString = classifier.get_interpretation(output_file_name, y_test)
+    classifierInterpretString = classifier.get_interpretation(output_file_name,
+                                                              y_test)
     return classifierConfigString, classifierInterpretString
 
 
@@ -63,7 +65,8 @@ def getMetricScore(metric, y_train, y_train_pred, y_test, y_test_pred):
     return metricScoreString, [metricScoreTrain, metricScoreTest]
 
 
-def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics_list, nIter,
+def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics_list,
+            nIter,
             feat, CL_type, clKWARGS, classLabelsNames,
             shape, y_train, y_train_pred, y_test, y_test_pred, time,
             random_state, classifier, output_file_name):
@@ -72,13 +75,15 @@ def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics_list, nIter
     trainScore = metricModule.score(y_train, y_train_pred)
     testScore = metricModule.score(y_test, y_test_pred)
     stringAnalysis = "Classification on " + name + " database for " + feat + " with " + CL_type + ".\n\n"
-    stringAnalysis += metrics_list[0][0] + " on train : " + str(trainScore) + "\n" + \
+    stringAnalysis += metrics_list[0][0] + " on train : " + str(
+        trainScore) + "\n" + \
                       metrics_list[0][0] + " on test : " + str(
         testScore) + "\n\n"
     stringAnalysis += getDBConfigString(name, feat, learningRate, shape,
                                         classLabelsNames, KFolds)
     classifierConfigString, classifierIntepretString = getClassifierConfigString(
-        gridSearch, nbCores, nIter, clKWARGS, classifier, output_file_name, y_test)
+        gridSearch, nbCores, nIter, clKWARGS, classifier, output_file_name,
+        y_test)
     stringAnalysis += classifierConfigString
     for metric in metrics_list:
         metricString, metricScore = getMetricScore(metric, y_train,
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_plot.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_plot.py
deleted file mode 100644
index b09645cb..00000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_plot.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# #!/usr/bin/env python
-#
-# """ Script whichs helps to replot preds from Feature Parameter Optimisation """
-#
-# # Import built-in modules
-# import argparse  # for acommand line arguments
-# import datetime  # for TimeStamp in CSVFile
-# import os  # to geth path of the running script
-# import matplotlib
-#
-# # matplotlib.use('Agg')
-# # Import 3rd party modules
-# import pandas as pd  # for Series
-# import numpy as np  # for DataFrames
-#
-# # Import own modules
-# import ExportResults  # Functions to render results
-#
-# # Author-Info
-# __author__ = "Nikolas Huelsmann"
-# __status__ = "Prototype"  # Production, Development, Prototype
-# __date__ = 2016 - 03 - 25
-#
-# parser = argparse.ArgumentParser(
-#     description='This method can be used to replot preds from Feature Parameter Optimisation',
-#     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-# args = parser.parse_args()
-# args.name = "Caltech"
-# args.valueStart = 2
-# args.valueEnd = 25
-# args.nCalcs = 5
-# args.feature = "HSV"
-# args.param = "HSV_V_Bins"
-# args.show = False
-# df_feat_res = pd.DataFrame.from_csv(
-#     path="D:\\BitBucket\\multiview-machine-learning-omis\\results\\Hydra\\2016_03_23-FPO-Caltech-HSV-HSV_V_Bins.csv",
-#     sep=';')
-#
-# # Get data from result to show results in plot
-# # logging.debug("Start:\t Plot Result")
-# # Total time for feature extraction and classification
-# tot_time = df_feat_res.b_feat_extr_time.values + df_feat_res.e_cl_time.values
-# tot_time = np.asarray(tot_time)
-# # Time for feature extraction
-# feat_time = df_feat_res.b_feat_extr_time.values
-# feat_time = np.asarray(feat_time)
-# # Time for classification
-# cl_time = df_feat_res.e_cl_time.values
-# cl_time = np.asarray(cl_time)
-#
-# # Mean Score of all classes
-# score = df_feat_res.f_cl_score.values
-# score = np.asarray(score)
-#
-# # Range on X-Axis
-# if args.nCalcs > 1:
-#     step = float(args.valueEnd - args.valueStart) / float(args.nCalcs - 1)
-#     rangeX = np.around(np.array(range(0, args.nCalcs)) * step) + args.valueStart
-# else:
-#     rangeX = [args.valueStart]
-# rangeX = np.asarray(rangeX)
-#
-# # Description of Classification
-# cl_desc = df_feat_res.c_cl_desc.values
-#
-# # Description of Feature
-# feat_desc = df_feat_res.a_feat_desc.values
-#
-# dir = os.path.dirname(os.path.abspath(__file__)) + "/results-FeatParaOpt/"
-# # filename = datetime.datetime.now().strftime("%Y_%m_%d") + "-FPO-" + args.name + "-" + args.feature + "-" + args.param
-# # ExportResults.exportPandasToCSV(df_feat_res, directory, filename)
-#
-# # Store or Show plot
-# if args.show:
-#     store = False
-# else:
-#     store = True
-#
-# fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-FPO-" + args.name + "-" + args.feature + "-" + args.param
-# # Show results for Calculation
-# ExportResults.showScoreTime(dir, fileName + "-TotalTime", store, score, tot_time, rangeX, args.param, feat_desc,
-#                             cl_desc, 'results for Parameter Optimisation - DB:' + args.name + ' Feat:' + args.feature,
-#                             'Precision', 'Total Time (Feature Extraction+Classification)\n [s]')
-# ExportResults.showScoreTime(dir, fileName + "-FeatExtTime", store, score, feat_time, rangeX, args.param, feat_desc,
-#                             cl_desc, 'results for Parameter Optimisation - DB:' + args.name + ' Feat:' + args.feature,
-#                             'Precision', 'Feature Extraction Time\n [s]')
-# ExportResults.showScoreTime(dir, fileName + "-ClassTime", store, score, cl_time, rangeX, args.param, feat_desc, cl_desc,
-#                             'results for Parameter Optimisation - DB:' + args.name + ' Feat:' + args.feature,
-#                             'Precision', 'Classification Time\n [s]')
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/run.py b/multiview_platform/mono_multi_view_classifiers/monoview/run.py
deleted file mode 100644
index 106adb04..00000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview/run.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# coding=utf-8
-import os
-
-os.system(
-    'python exec_classif_mono_view.py -log --name MultiOmicDataset --type hdf5 --feat RNASeq --pathF /home/doob/Téléchargements/Data_multi_omics/ --CL_type DecisionTree --CL_CV 5 --CL_Cores 4 --CL_split 0.5')
-# /donnees/pj_bdd_bbauvin/Data_multi_omics/
-# MiRNA_  RNASeq  Clinic
-#
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
index 9717adbf..0227e0e1 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
@@ -57,7 +57,7 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
         if isinstance(base_estimator, str):
             if base_estimator == "DecisionTreeClassifier":
                 base_estimator = DecisionTreeClassifier()
-        super(Adaboost, self).__init__(
+        AdaBoostClassifier.__init__(self,
             random_state=random_state,
             n_estimators=n_estimators,
             base_estimator=base_estimator,
@@ -91,7 +91,7 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
             Returns self.
         """
         begin = time.time()
-        super(Adaboost, self).fit(X, y, sample_weight=sample_weight)
+        AdaBoostClassifier.fit(self, X, y, sample_weight=sample_weight)
         end = time.time()
         self.train_time = end - begin
         self.train_shape = X.shape
@@ -101,16 +101,6 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
                                  self.staged_predict(X)])
         return self
 
-    # def canProbas(self):
-    #     """
-    #     Used to know if the classifier can return label probabilities
-    #
-    #     Returns
-    #     -------
-    #     True
-    #     """
-    #     return True
-
     def predict(self, X):
         """
 
@@ -128,7 +118,7 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
             The estimated labels.
         """
         begin = time.time()
-        pred = super(Adaboost, self).predict(X)
+        pred = AdaBoostClassifier.predict(self, X)
         end = time.time()
         self.pred_time = end - begin
         # TODO : mauvaise verif
@@ -158,12 +148,3 @@ class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
                    np.array([self.train_time, self.pred_time]), delimiter=',')
         return interpretString
 
-
-
-def paramsToSet(nIter, random_state):
-    """Used for weighted linear early fusion to generate random search sets"""
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_estimators": random_state.randint(1, 500),
-                          "base_estimator": None})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
index d4efc971..fabf2a72 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
@@ -13,7 +13,7 @@ class DecisionTree(DecisionTreeClassifier, BaseMonoviewClassifier):
 
     def __init__(self, random_state=None, max_depth=None,
                  criterion='gini', splitter='best', **kwargs):
-        super(DecisionTree, self).__init__(
+        DecisionTreeClassifier.__init__(self,
             max_depth=max_depth,
             criterion=criterion,
             splitter=splitter,
@@ -27,29 +27,8 @@ class DecisionTree(DecisionTreeClassifier, BaseMonoviewClassifier):
                          ["best", "random"], [random_state]]
         self.weird_strings = {}
 
-    # def canProbas(self):
-    #     """Used to know if the classifier can return label probabilities"""
-    #     return True
-
     def get_interpretation(self, directory, y_test):
         interpretString = "First featrue : \n\t{} <= {}\n".format(self.tree_.feature[0],
                                                                self.tree_.threshold[0])
         interpretString += self.get_feature_importance(directory)
         return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"max_depth": args.DT_depth,
-#                   "criterion": args.DT_criterion,
-#                   "splitter": args.DT_splitter}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"max_depth": randomState.randint(1, 300),
-                          "criterion": randomState.choice(["gini", "entropy"]),
-                          "splitter": randomState.choice(["best", "random"])})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
index a714b2af..4b06adee 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
@@ -16,7 +16,7 @@ classifier_class_name = "GradientBoosting"
 
 class CustomDecisionTree(DecisionTreeClassifier):
     def predict(self, X, check_input=True):
-        y_pred = super(CustomDecisionTree, self).predict(X,
+        y_pred = DecisionTreeClassifier.predict(self, X,
                                                          check_input=check_input)
         return y_pred.reshape((y_pred.shape[0], 1)).astype(float)
 
@@ -27,7 +27,7 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
                  n_estimators=100,
                  init=CustomDecisionTree(max_depth=1),
                  **kwargs):
-        super(GradientBoosting, self).__init__(
+        GradientBoostingClassifier.__init__(self,
             loss=loss,
             max_depth=max_depth,
             n_estimators=n_estimators,
@@ -44,7 +44,7 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
 
     def fit(self, X, y, sample_weight=None, monitor=None):
         begin = time.time()
-        super(GradientBoosting, self).fit(X, y, sample_weight=sample_weight)
+        GradientBoostingClassifier.fit(self, X, y, sample_weight=sample_weight)
         end = time.time()
         self.train_time = end - begin
         self.train_shape = X.shape
@@ -60,7 +60,7 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
 
     def predict(self, X):
         begin = time.time()
-        pred = super(GradientBoosting, self).predict(X)
+        pred = GradientBoostingClassifier.predict(self, X)
         end = time.time()
         self.pred_time = end - begin
         if X.shape != self.train_shape:
@@ -68,10 +68,6 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
                 [step_pred for step_pred in self.staged_predict(X)])
         return pred
 
-    # def canProbas(self):
-    #     """Used to know if the classifier can return label probabilities"""
-    #     return False
-
     def get_interpretation(self, directory, y_test, multi_class=False):
         interpretString = ""
         if multi_class:
@@ -92,16 +88,3 @@ class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
             np.savetxt(directory + "times.csv",
                        np.array([self.train_time, self.pred_time]), delimiter=',')
             return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"n_estimators": args.GB_n_est, }
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_estimators": randomState.randint(50, 500), })
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
index 1d2076b1..9b164f99 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
@@ -27,7 +27,7 @@ class KNN(KNeighborsClassifier, BaseMonoviewClassifier):
     def __init__(self, random_state=None, n_neighbors=5,
                  weights='uniform', algorithm='auto', p=2, **kwargs):
 
-        super(KNN, self).__init__(
+        KNeighborsClassifier.__init__(self,
             n_neighbors=n_neighbors,
             weights=weights,
             algorithm=algorithm,
@@ -42,31 +42,6 @@ class KNN(KNeighborsClassifier, BaseMonoviewClassifier):
         self.weird_strings = {}
         self.random_state = random_state
 
-    # def canProbas(self):
-    #     """Used to know if the classifier can return label probabilities"""
-    #     return True
-
-    def get_interpretation(self, directory, y_test):
+    def get_interpretation(self, directory, y_test, multiclass=False):
         interpretString = ""
         return interpretString
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"n_neighbors": args.KNN_neigh,
-#                   "weights": args.KNN_weights,
-#                   "algorithm": args.KNN_algo,
-#                   "p": args.KNN_p}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, random_state):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_neighbors": random_state.randint(1, 20),
-                          "weights": random_state.choice(
-                              ["uniform", "distance"]),
-                          "algorithm": random_state.choice(
-                              ["auto", "ball_tree", "kd_tree", "brute"]),
-                          "p": random_state.choice([1, 2])})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
index 14a20f3f..1c6de6a1 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
@@ -47,7 +47,7 @@ class Lasso(LassoSK, BaseMonoviewClassifier):
     """
     def __init__(self, random_state=None, alpha=1.0,
                  max_iter=10, warm_start=False, **kwargs):
-        super(Lasso, self).__init__(
+        LassoSK.__init__(self,
             alpha=alpha,
             max_iter=max_iter,
             warm_start=warm_start,
@@ -62,27 +62,17 @@ class Lasso(LassoSK, BaseMonoviewClassifier):
     def fit(self, X, y, check_input=True):
         neg_y = np.copy(y)
         neg_y[np.where(neg_y == 0)] = -1
-        super(Lasso, self).fit(X, neg_y)
+        LassoSK.fit(self, X, neg_y)
         # self.feature_importances_ = self.coef_/np.sum(self.coef_)
         return self
 
     def predict(self, X):
-        prediction = super(Lasso, self).predict(X)
+        prediction = LassoSK.predict(self, X)
         signed = np.sign(prediction)
         signed[np.where(signed == -1)] = 0
         return signed
 
-    # def canProbas(self):
-    #     """
-    #     Used to know if the classifier can return label probabilities
-    #
-    #     Returns
-    #     -------
-    #     False
-    #     """
-    #     return False
-
-    def get_interpretation(self, directory, y_test):
+    def get_interpretation(self, directory, y_test, multiclass=False):
         """
         return the interpreted string
 
@@ -97,19 +87,4 @@ class Lasso(LassoSK, BaseMonoviewClassifier):
         interpreted string, str interpret_string
         """
         interpret_string = ""
-        return interpret_string
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"alpha": args.LA_alpha,
-#                   "max_iter": args.LA_n_iter}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"max_iter": randomState.randint(1, 300),
-                          "alpha": randomState.uniform(0, 1.0), })
-    return paramsSet
+        return interpret_string
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
index b35d0236..ec7fd6ba 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
@@ -50,7 +50,7 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
         criterion
         kwargs
         """
-        super(RandomForest, self).__init__(
+        RandomForestClassifier.__init__(self,
             n_estimators=n_estimators,
             max_depth=max_depth,
             criterion=criterion,
@@ -64,16 +64,7 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
                          ["gini", "entropy"], [random_state]]
         self.weird_strings = {}
 
-    # def canProbas(self):
-    #     """Used to know if the classifier can return label probabilities
-    #
-    #     Returns
-    #     -------
-    #     True
-    #     """
-    #     return True
-
-    def get_interpretation(self, directory, y_test):
+    def get_interpretation(self, directory, y_test, multiclass=False):
         """
 
         Parameters
@@ -88,20 +79,3 @@ class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
         interpret_string = ""
         interpret_string += self.get_feature_importance(directory)
         return interpret_string
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"n_estimators": args.RF_trees,
-#                   "max_depth": args.RF_max_depth,
-#                   "criterion": args.RF_criterion}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, random_state):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"n_estimators": random_state.randint(1, 300),
-                          "max_depth": random_state.randint(1, 300),
-                          "criterion": random_state.choice(["gini", "entropy"])})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
index 18d0cbd6..34baf664 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
@@ -39,7 +39,7 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
     def __init__(self, random_state=None, loss='hinge',
                  penalty='l2', alpha=0.0001, max_iter=5, tol=None, **kwargs):
 
-        super(SGD, self).__init__(
+        SGDClassifier.__init__(self,
             loss=loss,
             penalty=penalty,
             alpha=alpha,
@@ -54,18 +54,7 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
                          CustomUniform(loc=0, state=1), [random_state]]
         self.weird_strings = {}
 
-    # def canProbas(self):
-    #     """
-    #     Used to know if the classifier can return label probabilities
-    #
-    #     Returns
-    #     -------
-    #     return True in all case
-    #     """
-    #
-    #     return True
-
-    def get_interpretation(self, directory, y_test):
+    def get_interpretation(self, directory, y_test, multiclass=False):
         """
 
         Parameters
@@ -82,21 +71,3 @@ class SGD(SGDClassifier, BaseMonoviewClassifier):
         import numpy as np
         # self.feature_importances_ = (self.coef_/np.sum(self.coef_)).reshape(self.coef_.shape[1])
         return interpret_string
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"loss": args.SGD_loss,
-#                   "penalty": args.SGD_penalty,
-#                   "alpha": args.SGD_alpha}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, random_state):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"loss": random_state.choice(['log', 'modified_huber']),
-                          "penalty": random_state.choice(
-                              ["l1", "l2", "elasticnet"]),
-                          "alpha": random_state.random_sample()})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
index 23213032..d29b92f4 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
@@ -28,7 +28,7 @@ class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
     """
     def __init__(self, random_state=None, C=1.0, **kwargs):
 
-        super(SVMLinear, self).__init__(
+        SVCClassifier.__init__(self,
             C=C,
             kernel='linear',
             random_state=random_state
@@ -41,15 +41,3 @@ class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
         # self.feature_importances_ = (self.coef_/np.sum(self.coef_)).reshape((self.coef_.shape[1],))
         return interpret_string
 
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"C": args.SVML_C, }
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"C": randomState.randint(1, 10000), })
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
index d60b3c55..351fc05a 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
@@ -37,7 +37,7 @@ class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
     """
     def __init__(self, random_state=None, C=1.0, degree=3, **kwargs):
 
-        super(SVMPoly, self).__init__(
+        SVCClassifier.__init__(self,
             C=C,
             kernel='poly',
             degree=degree,
@@ -46,17 +46,3 @@ class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
         self.param_names = ["C", "degree", "random_state"]
         self.distribs = [CustomUniform(loc=0, state=1),
                          CustomRandint(low=2, high=30), [random_state]]
-
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"C": args.SVMPoly_C, "degree": args.SVMPoly_deg}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"C": randomState.randint(1, 10000),
-                          "degree": randomState.randint(1, 30)})
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
index 42fc2ab7..9dbea0e0 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
@@ -8,6 +8,7 @@ __status__ = "Prototype"  # Production, Development, Prototype
 
 classifier_class_name = "SVMRBF"
 
+
 class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
     """
     class SVMRBF for classifier SVCC
@@ -31,7 +32,7 @@ class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
     """
     def __init__(self, random_state=None, C=1.0, **kwargs):
 
-        super(SVMRBF, self).__init__(
+        SVCClassifier.__init__(self,
             C=C,
             kernel='rbf',
             random_state=random_state
@@ -39,27 +40,3 @@ class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
         self.param_names = ["C", "random_state"]
         self.distribs = [CustomUniform(loc=0, state=1), [random_state]]
 
-
-# def formatCmdArgs(args):
-#     """Used to format kwargs for the parsed args"""
-#     kwargsDict = {"C": args.SVMRBF_C}
-#     return kwargsDict
-
-
-def paramsToSet(nIter, randomState):
-    """
-
-    Parameters
-    ----------
-    nIter : int number of iterations
-
-    randomState :
-
-    Returns
-    -------
-    paramsSet list of parameters dictionary  with key "C"
-    """
-    paramsSet = []
-    for _ in range(nIter):
-        paramsSet.append({"C": randomState.randint(1, 10000), })
-    return paramsSet
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
index bebc38ef..09b863bc 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
@@ -19,7 +19,7 @@ class DiversityFusionClassifier(BaseMultiviewClassifier,
     def __init__(self, random_state=None, classifier_names=None,
                  monoview_estimators=None, classifier_configs=None):
         """Used to init the instances"""
-        super(DiversityFusionClassifier, self).__init__(random_state)
+        BaseMultiviewClassifier.__init__(self, random_state)
         if classifier_names is None:
             classifier_names = get_available_monoview_classifiers()
         self.classifier_names = classifier_names
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
index fd82ff69..1c28f931 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
@@ -9,7 +9,7 @@ class BaseJumboFusion(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None,
                  nb_cores=1, weights=None, nb_monoview_per_view=1, rs=None):
-        super(BaseJumboFusion, self).__init__(random_state, classifiers_names=classifiers_names,
+        LateFusionClassifier.__init__(self, random_state, classifiers_names=classifiers_names,
                                              classifier_configs=classifier_configs,
                                              nb_cores=nb_cores, weights=weights,
                                               rs=rs)
@@ -19,7 +19,7 @@ class BaseJumboFusion(LateFusionClassifier):
 
     def set_params(self, nb_monoview_per_view=1, **params):
         self.nb_monoview_per_view = nb_monoview_per_view
-        super(BaseJumboFusion, self).set_params(**params)
+        LateFusionClassifier.set_params(self, **params)
 
     def predict(self, X, example_indices=None, view_indices=None):
         example_indices, view_indices = get_examples_views_indices(X, example_indices, view_indices)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
index f0a67a19..d8ebceff 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
@@ -81,7 +81,7 @@ class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
     def __init__(self, random_state=None, classifiers_names=None,
                  classifier_configs=None, nb_cores=1, weights=None,
                  rs=None):
-        super(LateFusionClassifier, self).__init__(random_state)
+        BaseMultiviewClassifier.__init__(self, random_state)
         self.classifiers_names = classifiers_names
         self.classifier_configs = classifier_configs
         self.nb_cores = nb_cores
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
index e1c05b26..2d9c903b 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
@@ -12,7 +12,7 @@ class BayesianInferenceClassifier(LateFusionClassifier):
                  classifier_configs=None, nb_cores=1, weights=None,
                  rs=None):
         self.need_probas=True
-        super(BayesianInferenceClassifier, self).__init__(random_state=random_state,
+        LateFusionClassifier.__init__(self, random_state=random_state,
                                              classifiers_names=classifiers_names,
                                              classifier_configs=classifier_configs,
                                              nb_cores=nb_cores,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
index 932fde9e..4047402a 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
@@ -12,7 +12,7 @@ class MajorityVoting(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None, weights=None, nb_cores=1, rs=None):
         self.need_probas=False
-        super(MajorityVoting, self).__init__(random_state=random_state,
+        LateFusionClassifier.__init__(self, random_state=random_state,
                                       classifiers_names=classifiers_names,
                                       classifier_configs=classifier_configs,
                                       nb_cores=nb_cores,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
index bcdf1ef8..76637c59 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
@@ -11,7 +11,7 @@ class SVMJumboFusion(BaseJumboFusion):
                  classifier_configs=None, nb_cores=1, weights=None,
                  nb_monoview_per_view=1,  C=1.0, kernel="rbf", degree=2, rs=None):
         self.need_probas=False
-        super(SVMJumboFusion, self).__init__(random_state, classifiers_names=classifiers_names,
+        BaseJumboFusion.__init__(self, random_state, classifiers_names=classifiers_names,
                                              classifier_configs=classifier_configs,
                                              nb_cores=nb_cores, weights=weights,
                                              nb_monoview_per_view=nb_monoview_per_view,
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
index 4f4eadce..de918400 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
@@ -31,7 +31,7 @@ class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
     def __init__(self, random_state=None, view_weights=None,
                  monoview_classifier_name="decision_tree",
                  monoview_classifier_config={}):
-        super(WeightedLinearEarlyFusion, self).__init__(random_state=random_state)
+        BaseMultiviewClassifier.__init__(self, random_state=random_state)
         self.view_weights = view_weights
         self.monoview_classifier_name = monoview_classifier_name
         self.short_name = "early fusion " + self.monoview_classifier_name
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
index 91b69b43..7e5c01aa 100644
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
+++ b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
@@ -10,7 +10,7 @@ class WeightedLinearLateFusion(LateFusionClassifier):
     def __init__(self, random_state, classifiers_names=None,
                  classifier_configs=None, weights=None, nb_cores=1, rs=None):
         self.need_probas=True
-        super(WeightedLinearLateFusion, self).__init__(random_state=random_state,
+        LateFusionClassifier.__init__(self, random_state=random_state,
                                       classifiers_names=classifiers_names,
                                       classifier_configs=classifier_configs,
                                       nb_cores=nb_cores,weights=weights, rs=rs)
-- 
GitLab