From 6d384da476d8de9d570d71fc5ecaf19007dca71b Mon Sep 17 00:00:00 2001
From: bbauvin <baptiste.bauvin@centrale-marseille.fr>
Date: Mon, 29 Aug 2016 10:08:30 -0400
Subject: [PATCH] Merged gridsearches and added custom metrics for gs need to
 add it to result analysis

---
 Code/MonoMutliViewClassifiers/ExecClassif.py  |  8 +-
 .../Metrics/accuracy_score.py                 |  9 ++
 .../Metrics/f1_score.py                       | 22 +++++
 .../Metrics/fbeta_score.py                    | 26 ++++++
 .../Metrics/hamming_loss.py                   |  9 ++
 .../Metrics/jaccard_similarity_score.py       |  9 ++
 .../Metrics/log_loss.py                       | 13 +++
 .../Metrics/matthews_corrcoef.py              |  7 +-
 .../Metrics/precision_score.py                | 22 +++++
 .../Metrics/recall_score.py                   | 22 +++++
 .../Metrics/roc_auc_score.py                  | 13 +++
 .../Metrics/zero_one_loss.py                  |  9 ++
 .../Monoview/ExecClassifMonoView.py           | 15 ++-
 .../MonoviewClassifiers/Adaboost.py           | 31 ++++---
 .../MonoviewClassifiers/DecisionTree.py       | 27 +++---
 .../MonoviewClassifiers/KNN.py                | 25 +++--
 .../MonoviewClassifiers/RandomForest.py       | 68 +++++++-------
 .../MonoviewClassifiers/SGD.py                | 29 +++---
 .../MonoviewClassifiers/SVMLinear.py          | 26 ++++--
 .../MonoviewClassifiers/SVMPoly.py            | 31 +++++--
 .../MonoviewClassifiers/SVMRBF.py             | 25 +++--
 .../Multiview/ExecMultiview.py                |  8 +-
 .../Multiview/Fusion/Fusion.py                | 26 +++++-
 .../Multiview/Fusion/Methods/EarlyFusion.py   | 92 +++++++++----------
 .../Mumbo/Classifiers/DecisionTree.py         |  2 +-
 .../Multiview/Mumbo/Classifiers/Kover.py      |  7 +-
 .../Multiview/Mumbo/Mumbo.py                  | 12 +--
 27 files changed, 411 insertions(+), 182 deletions(-)

diff --git a/Code/MonoMutliViewClassifiers/ExecClassif.py b/Code/MonoMutliViewClassifiers/ExecClassif.py
index 4f52c31c..54b27ba1 100644
--- a/Code/MonoMutliViewClassifiers/ExecClassif.py
+++ b/Code/MonoMutliViewClassifiers/ExecClassif.py
@@ -58,6 +58,8 @@ groupClass.add_argument('--CL_algos_multiview', metavar='STRING', action='store'
                         help='Determine which multiview classifier to use, separate with : if multiple, if empty, considering all', default='')
 groupClass.add_argument('--CL_cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int,
                         default=1)
+groupClass.add_argument('--CL_metrics', metavar='STRING', action='store',
+                        help='Determine which metric to use, separate with ":" if multiple, if empty, considering all', default='')
 
 groupRF = parser.add_argument_group('Random Forest arguments')
 groupRF.add_argument('--CL_RF_trees', metavar='STRING', action='store', help='GridSearch: Determine the trees',
@@ -198,9 +200,9 @@ if "Monoview" in args.CL_type.strip(":"):
 
 fusionClassifierConfig = "a"
 fusionMethodConfig = "a"
-mumboNB_ITER = 2
 mumboClassifierConfig = "a"
 mumboclassifierNames = "a"
+metrics = args.CL_metrics.split(":")
 
 RandomForestKWARGS = {"0":map(int, args.CL_RF_trees.split())}
 SVMLinearKWARGS = {"0":map(int, args.CL_SVML_C.split(":"))}
@@ -230,7 +232,7 @@ for viewIndex, viewArguments in enumerate(argumentDictionaries["Monoview"].value
     resultsMonoview += (Parallel(n_jobs=nbCores)(
         delayed(ExecMonoview)(DATASET.get("View"+str(viewIndex)), DATASET.get("labels").value, args.name,
                               args.CL_split, args.CL_nbFolds, 1, args.type, args.pathF, gridSearch=True,
-                              **arguments)
+                              metrics=metrics[viewIndex], **arguments)
         for arguments in viewArguments))
 
     accuracies = [result[1] for result in resultsMonoview[viewIndex]]
@@ -283,7 +285,7 @@ for viewIndex, viewArguments in enumerate(argumentDictionaries["Monoview"].value
 
 # resultsMultiview = Parallel(n_jobs=nbCores)(
 #     delayed(ExecMultiview)(DATASET, args.name, args.CL_split, args.CL_nbFolds, 1, args.type, args.pathF,
-#                            LABELS_DICTIONARY, gridSearch=True, **arguments)
+#                            LABELS_DICTIONARY, gridSearch=True, metrics=metrics, **arguments)
 #     for arguments in argumentDictionaries["Multiview"])
 resultsMultiview = []
 results = (resultsMonoview, resultsMultiview)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/accuracy_score.py b/Code/MonoMutliViewClassifiers/Metrics/accuracy_score.py
index 08d5c30d..226326f9 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/accuracy_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/accuracy_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import accuracy_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -8,3 +9,11 @@ def score(y_true, y_pred, **kwargs):
         sample_weight=None
     score = metric(y_true, y_pred, sample_weight=sample_weight)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight)
\ No newline at end of file
diff --git a/Code/MonoMutliViewClassifiers/Metrics/f1_score.py b/Code/MonoMutliViewClassifiers/Metrics/f1_score.py
index 3dc8c3ae..d563d7dc 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/f1_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/f1_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import f1_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -20,3 +21,24 @@ def score(y_true, y_pred, **kwargs):
         average = "binary"
     score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels, pos_label=pos_label, average=average)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    try:
+        labels = kwargs["1"]
+    except:
+        labels=None
+    try:
+        pos_label = kwargs["2"]
+    except:
+        pos_label = 1
+    try:
+        average = kwargs["3"]
+    except:
+        average = "binary"
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight, labels=labels,
+                       pos_label=pos_label, average=average)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/fbeta_score.py b/Code/MonoMutliViewClassifiers/Metrics/fbeta_score.py
index 74f1e241..73d834e2 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/fbeta_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/fbeta_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import fbeta_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -24,3 +25,28 @@ def score(y_true, y_pred, **kwargs):
         average = "binary"
     score = metric(y_true, y_pred, beta, sample_weight=sample_weight, labels=labels, pos_label=pos_label, average=average)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    try:
+        beta = kwargs["1"]
+    except:
+        beta=1.0
+    try:
+        labels = kwargs["2"]
+    except:
+        labels=None
+    try:
+        pos_label = kwargs["3"]
+    except:
+        pos_label = 1
+    try:
+        average = kwargs["4"]
+    except:
+        average = "binary"
+    return make_scorer(metric, greater_is_better=True, beta=beta, sample_weight=sample_weight, labels=labels,
+                       pos_label=pos_label, average=average)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/hamming_loss.py b/Code/MonoMutliViewClassifiers/Metrics/hamming_loss.py
index fe35066d..8bc33bc2 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/hamming_loss.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/hamming_loss.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import hamming_loss as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -8,3 +9,11 @@ def score(y_true, y_pred, **kwargs):
         classes=None
     score = metric(y_true, y_pred, classes=classes)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        classes = kwargs["0"]
+    except:
+        classes=None
+    return make_scorer(metric, greater_is_better=False, classes=classes)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/jaccard_similarity_score.py b/Code/MonoMutliViewClassifiers/Metrics/jaccard_similarity_score.py
index c6d134ad..18e0b484 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/jaccard_similarity_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/jaccard_similarity_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import jaccard_similarity_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -8,3 +9,11 @@ def score(y_true, y_pred, **kwargs):
         sample_weight = None
     score = metric(y_true, y_pred, sample_weight=sample_weight)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight = None
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/log_loss.py b/Code/MonoMutliViewClassifiers/Metrics/log_loss.py
index a6dbf8a3..cb14bee9 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/log_loss.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/log_loss.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import log_loss as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -12,3 +13,15 @@ def score(y_true, y_pred, **kwargs):
         eps = 1e-15
     score = metric(y_true, y_pred, sample_weight=sample_weight, eps=eps)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight = None
+    try:
+        eps = kwargs["1"]
+    except:
+        eps = 1e-15
+    return make_scorer(metric, greater_is_better=False, sample_weight=sample_weight, eps=eps)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/matthews_corrcoef.py b/Code/MonoMutliViewClassifiers/Metrics/matthews_corrcoef.py
index b1ec857a..2a3c4447 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/matthews_corrcoef.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/matthews_corrcoef.py
@@ -1,6 +1,11 @@
 from sklearn.metrics import matthews_corrcoef as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
     score = metric(y_true, y_pred)
-    return score
\ No newline at end of file
+    return score
+
+
+def get_scorer(**kwargs):
+    return make_scorer(metric, greater_is_better=True)
\ No newline at end of file
diff --git a/Code/MonoMutliViewClassifiers/Metrics/precision_score.py b/Code/MonoMutliViewClassifiers/Metrics/precision_score.py
index 2a6cccad..9839c817 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/precision_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/precision_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import precision_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -20,3 +21,24 @@ def score(y_true, y_pred, **kwargs):
         average = "binary"
     score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels, pos_label=pos_label, average=average)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    try:
+        labels = kwargs["1"]
+    except:
+        labels=None
+    try:
+        pos_label = kwargs["2"]
+    except:
+        pos_label = 1
+    try:
+        average = kwargs["3"]
+    except:
+        average = "binary"
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight, labels=labels, pos_label=pos_label,
+                       average=average)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/recall_score.py b/Code/MonoMutliViewClassifiers/Metrics/recall_score.py
index a4d10284..f4d38465 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/recall_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/recall_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import recall_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -20,3 +21,24 @@ def score(y_true, y_pred, **kwargs):
         average = "binary"
     score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels, pos_label=pos_label, average=average)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    try:
+        labels = kwargs["1"]
+    except:
+        labels=None
+    try:
+        pos_label = kwargs["2"]
+    except:
+        pos_label = 1
+    try:
+        average = kwargs["3"]
+    except:
+        average = "binary"
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight, labels=labels, pos_label=pos_label,
+                       average=average)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/roc_auc_score.py b/Code/MonoMutliViewClassifiers/Metrics/roc_auc_score.py
index 5a9f7cfb..04fb96cb 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/roc_auc_score.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/roc_auc_score.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import roc_auc_score as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -12,3 +13,15 @@ def score(y_true, y_pred, **kwargs):
         average = "binary"
     score = metric(y_true, y_pred, sample_weight=sample_weight, average=average)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    try:
+        average = kwargs["1"]
+    except:
+        average = "binary"
+    return make_scorer(metric, greater_is_better=True, sample_weight=sample_weight, average=average)
diff --git a/Code/MonoMutliViewClassifiers/Metrics/zero_one_loss.py b/Code/MonoMutliViewClassifiers/Metrics/zero_one_loss.py
index a741825b..833d2402 100644
--- a/Code/MonoMutliViewClassifiers/Metrics/zero_one_loss.py
+++ b/Code/MonoMutliViewClassifiers/Metrics/zero_one_loss.py
@@ -1,4 +1,5 @@
 from sklearn.metrics import zero_one_loss as metric
+from sklearn.metrics import make_scorer
 
 
 def score(y_true, y_pred, **kwargs):
@@ -8,3 +9,11 @@ def score(y_true, y_pred, **kwargs):
         sample_weight=None
     score = metric(y_true, y_pred, sample_weight=sample_weight)
     return score
+
+
+def get_scorer(**kwargs):
+    try:
+        sample_weight = kwargs["0"]
+    except:
+        sample_weight=None
+    return make_scorer(metric, greater_is_better=False, sample_weight=sample_weight)
diff --git a/Code/MonoMutliViewClassifiers/Monoview/ExecClassifMonoView.py b/Code/MonoMutliViewClassifiers/Monoview/ExecClassifMonoView.py
index dda9e5d7..9e2a187c 100644
--- a/Code/MonoMutliViewClassifiers/Monoview/ExecClassifMonoView.py
+++ b/Code/MonoMutliViewClassifiers/Monoview/ExecClassifMonoView.py
@@ -20,6 +20,7 @@ import h5py
 import ClassifMonoView	                # Functions for classification
 import ExportResults                    # Functions to render results
 import MonoviewClassifiers
+import Metrics
 
 # Author-Info
 __author__ 	= "Nikolas Huelsmann, Baptiste BAUVIN"
@@ -30,7 +31,8 @@ __date__	= 2016-03-25
 ### Argument Parser
 
 
-def ExecMonoview(X, Y, name, learningRate, nbFolds, nbCores, databaseType, path, gridSearch=True, **kwargs):
+def ExecMonoview(X, Y, name, learningRate, nbFolds, nbCores, databaseType, path, gridSearch=True,
+                metrics="accuracy_score", **kwargs):
 
     t_start = time.time()
     directory = os.path.dirname(os.path.abspath(__file__)) + "/Results-ClassMonoView/"
@@ -41,6 +43,7 @@ def ExecMonoview(X, Y, name, learningRate, nbFolds, nbCores, databaseType, path,
     CL_type = kwargs["CL_type"]
     classifierKWARGS = kwargs[CL_type+"KWARGS"]
     X = X.value
+    metrics = [getattr(Metrics, metric) for metric in metrics]
 
     # Determine the Database to extract features
     logging.debug("### Main Programm for Classification MonoView")
@@ -61,9 +64,10 @@ def ExecMonoview(X, Y, name, learningRate, nbFolds, nbCores, databaseType, path,
 
 
     classifierModule = getattr(MonoviewClassifiers, CL_type)
-    classifierFunction = getattr(classifierModule, "fit_gridsearch")
+    classifierGridSearch = getattr(classifierModule, "gridSearch")
 
-    cl_desc, cl_res = classifierFunction(X_train, y_train, nbFolds=nbFolds, nbCores=nbCores,**classifierKWARGS)
+    cl_desc = classifierGridSearch(X_train, y_train, nbFolds=nbFolds, nbCores=nbCores, metrics=metrics)
+    cl_res = classifierModule.fit(X_train, y_train, NB_CORES=nbCores)
     t_end  = time.time() - t_start
 
     # Add result to Results DF
@@ -149,6 +153,8 @@ if __name__=='__main__':
     groupClass.add_argument('--CL_CV', metavar='INT', action='store', help='Number of k-folds for CV', type=int, default=10)
     groupClass.add_argument('--CL_Cores', metavar='INT', action='store', help='Number of cores, -1 for all', type=int, default=1)
     groupClass.add_argument('--CL_split', metavar='FLOAT', action='store', help='Split ratio for train and test', type=float, default=0.9)
+    groupClass.add_argument('--CL_metrics', metavar='STRING', action='store',
+                        help='Determine which metric to use, separate with ":" if multiple, if empty, considering all', default='')
 
 
     groupClassifier = parser.add_argument_group('Classifier Config')
@@ -217,4 +223,5 @@ if __name__=='__main__':
 
     arguments = {args.CL_type+"KWARGS": classifierKWARGS, "feat":args.feat,"fileFeat": args.fileFeat,
                  "fileCL": args.fileCL, "fileCLD": args.fileCLD, "CL_type": args.CL_type}
-    ExecMonoview(X, Y, args.name, args.CL_split, args.CL_CV, args.CL_Cores, args.type, args.pathF, **arguments)
+    ExecMonoview(X, Y, args.name, args.CL_split, args.CL_CV, args.CL_Cores, args.type, args.pathF,
+                 metrics=args.CL_metrics, **arguments)
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/Adaboost.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/Adaboost.py
index 6eba0119..df9269d6 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/Adaboost.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/Adaboost.py
@@ -5,6 +5,7 @@ from sklearn.tree import DecisionTreeClassifier
 from sklearn.utils.testing import all_estimators
 import inspect
 import numpy as np
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -14,25 +15,29 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     classifier.fit(DATASET, CLASS_LABELS)
     return "No desc", classifier
 
-
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline = Pipeline([('classifier', AdaBoostClassifier())])
-    param= {"classifier__n_estimators": map(int, kwargs['0']),
-                "classifier__base_estimator": [DecisionTreeClassifier() for arg in kwargs["1"]]}
-    grid = RandomizedSearchCV(pipeline,param_distributions=param,refit=True,n_jobs=nbCores,scoring='accuracy',cv=nbFolds)
-    detector = grid.fit(X_train, y_train)
-    desc_estimators = [detector.best_params_["classifier__n_estimators"]]
-    description = "Classif_" + "RF" + "-" + "CV_" +  str(nbFolds) + "-" + "Trees_" + str(map(str,desc_estimators))
-    return description, detector
-
-
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1):
+#
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline = Pipeline([('classifier', AdaBoostClassifier())])
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     param= {"classifier__n_estimators": map(int, kwargs['0']),
+#                 "classifier__base_estimator": [DecisionTreeClassifier() for arg in kwargs["1"]]}
+#     grid = RandomizedSearchCV(pipeline,param_distributions=param,refit=True,n_jobs=nbCores,scoring=scorer,cv=nbFolds)
+#     detector = grid.fit(X_train, y_train)
+#     desc_estimators = [detector.best_params_["classifier__n_estimators"]]
+#     description = "Classif_" + "RF" + "-" + "CV_" +  str(nbFolds) + "-" + "Trees_" + str(map(str,desc_estimators))
+#     return description, detector
+
+
+def gridSearch(X_train, y_train, nbFolds=4, metric=["accuracy_score", None], nbCores=1):
     pipeline = Pipeline([('classifier', AdaBoostClassifier())])
     classifiers = [clf for name, clf in all_estimators(type_filter='classifier')
                    if 'sample_weight' in inspect.getargspec(clf().fit)[0]
                    and (name != "AdaBoostClassifier" and name !="GradientBoostingClassifier")]
     param= {"classifier__n_estimators": np.random.randint(1, 30, 10),
             "classifier__base_estimator": classifiers}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid = RandomizedSearchCV(pipeline,param_distributions=param,refit=True,n_jobs=nbCores,scoring='accuracy',cv=nbFolds)
     detector = grid.fit(X_train, y_train)
     desc_estimators = [detector.best_params_["classifier__n_estimators"],
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/DecisionTree.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/DecisionTree.py
index c9c86c35..ce7e739b 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/DecisionTree.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/DecisionTree.py
@@ -2,6 +2,7 @@ from sklearn.tree import DecisionTreeClassifier
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1, **kwargs):
     maxDepth = int(kwargs['0'])
@@ -10,22 +11,24 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1, **kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_DT = Pipeline([('classifier', DecisionTreeClassifier())])
-    param_DT = {"classifier__max_depth":map(int, kwargs['0'])}
-
-    grid_DT = GridSearchCV(pipeline_DT, param_grid=param_DT, refit=True, n_jobs=nbCores, scoring='accuracy',
-                           cv=nbFolds)
-    DT_detector = grid_DT.fit(X_train, y_train)
-    desc_params = [DT_detector.best_params_["classifier__max_depth"]]
-    description = "Classif_" + "DT" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
-    return description, DT_detector
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_DT = Pipeline([('classifier', DecisionTreeClassifier())])
+#     param_DT = {"classifier__max_depth":map(int, kwargs['0'])}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_DT = GridSearchCV(pipeline_DT, param_grid=param_DT, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                            cv=nbFolds)
+#     DT_detector = grid_DT.fit(X_train, y_train)
+#     desc_params = [DT_detector.best_params_["classifier__max_depth"]]
+#     description = "Classif_" + "DT" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
+#     return description, DT_detector
 
 
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_DT = Pipeline([('classifier', DecisionTreeClassifier())])
     param_DT = {"classifier__max_depth":np.random.randint(1, 30, 10)}
-
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_DT = GridSearchCV(pipeline_DT, param_grid=param_DT, refit=True, n_jobs=nbCores, scoring='accuracy',
                            cv=nbFolds)
     DT_detector = grid_DT.fit(X_train, y_train)
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/KNN.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/KNN.py
index 60cc21c1..5e513325 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/KNN.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/KNN.py
@@ -2,6 +2,7 @@ from sklearn.neighbors import KNeighborsClassifier
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     nNeighbors = int(kwargs['0'])
@@ -10,20 +11,24 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_KNN = Pipeline([('classifier', KNeighborsClassifier())])
-    param_KNN = {"classifier__n_neighbors": map(int, kwargs['0'])}
-    grid_KNN = GridSearchCV(pipeline_KNN, param_grid=param_KNN, refit=True, n_jobs=nbCores, scoring='accuracy',
-                            cv=nbFolds)
-    KNN_detector = grid_KNN.fit(X_train, y_train)
-    desc_params = [KNN_detector.best_params_["classifier__n_neighbors"]]
-    description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
-    return description, KNN_detector
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_KNN = Pipeline([('classifier', KNeighborsClassifier())])
+#     param_KNN = {"classifier__n_neighbors": map(int, kwargs['0'])}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_KNN = GridSearchCV(pipeline_KNN, param_grid=param_KNN, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                             cv=nbFolds)
+#     KNN_detector = grid_KNN.fit(X_train, y_train)
+#     desc_params = [KNN_detector.best_params_["classifier__n_neighbors"]]
+#     description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
+#     return description, KNN_detector
 
 
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_KNN = Pipeline([('classifier', KNeighborsClassifier())])
     param_KNN = {"classifier__n_neighbors": np.random.randint(1, 30, 10)}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_KNN = GridSearchCV(pipeline_KNN, param_grid=param_KNN, refit=True, n_jobs=nbCores, scoring='accuracy',
                             cv=nbFolds)
     KNN_detector = grid_KNN.fit(X_train, y_train)
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/RandomForest.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/RandomForest.py
index 1b3a1f41..445fdfec 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/RandomForest.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/RandomForest.py
@@ -1,6 +1,7 @@
 from sklearn.ensemble import RandomForestClassifier
 from sklearn.pipeline import Pipeline
 from sklearn.grid_search import GridSearchCV
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -11,41 +12,42 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-
-    # PipeLine with RandomForest classifier
-    pipeline_rf = Pipeline([('classifier', RandomForestClassifier())])
-
-    # Parameters for GridSearch: Number of Trees
-    # can be extended with: oob_score, min_samples_leaf, max_features
-    param_rf = {"classifier__n_estimators": map(int, kwargs['0'])}
-
-    # pipeline: Gridsearch avec le pipeline comme estimator
-    # param: pour obtenir le meilleur model il va essayer tous les possiblites
-    # refit: pour utiliser le meilleur model apres girdsearch
-    # n_jobs: Nombre de CPU (Mon ordi a des problemes avec -1 (Bug Python 2.7 sur Windows))
-    # scoring: scoring...
-    # cv: Nombre de K-Folds pour CV
-    grid_rf = GridSearchCV(
-        pipeline_rf,
-        param_grid=param_rf,
-        refit=True,
-        n_jobs=nbCores,
-        scoring='accuracy',
-        cv=nbFolds,
-    )
-
-    rf_detector = grid_rf.fit(X_train, y_train)
-
-    desc_estimators = [rf_detector.best_params_["classifier__n_estimators"]]
-    description = "Classif_" + "RF" + "-" + "CV_" +  str(nbFolds) + "-" + "Trees_" + str(map(str,desc_estimators))
-    return description, rf_detector
-
-
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#
+#     # PipeLine with RandomForest classifier
+#     pipeline_rf = Pipeline([('classifier', RandomForestClassifier())])
+#
+#     # Parameters for GridSearch: Number of Trees
+#     # can be extended with: oob_score, min_samples_leaf, max_features
+#     param_rf = {"classifier__n_estimators": map(int, kwargs['0'])}
+#
+#     # pipeline: Gridsearch avec le pipeline comme estimator
+#     # param: pour obtenir le meilleur model il va essayer tous les possiblites
+#     # refit: pour utiliser le meilleur model apres girdsearch
+#     # n_jobs: Nombre de CPU (Mon ordi a des problemes avec -1 (Bug Python 2.7 sur Windows))
+#     # scoring: scoring...
+#     # cv: Nombre de K-Folds pour CV
+#     grid_rf = GridSearchCV(
+#         pipeline_rf,
+#         param_grid=param_rf,
+#         refit=True,
+#         n_jobs=nbCores,
+#         scoring='accuracy',
+#         cv=nbFolds,
+#     )
+#
+#     rf_detector = grid_rf.fit(X_train, y_train)
+#
+#     desc_estimators = [rf_detector.best_params_["classifier__n_estimators"]]
+#     description = "Classif_" + "RF" + "-" + "CV_" +  str(nbFolds) + "-" + "Trees_" + str(map(str,desc_estimators))
+#     return description, rf_detector
+
+
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_rf = Pipeline([('classifier', RandomForestClassifier())])
     param_rf = {"classifier__n_estimators": np.random.randint(1, 30, 10)}
-
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_rf = GridSearchCV(pipeline_rf,param_grid=param_rf,refit=True,n_jobs=nbCores,scoring='accuracy',cv=nbFolds)
     rf_detector = grid_rf.fit(X_train, y_train)
 
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SGD.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SGD.py
index 9ad1f292..15627703 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SGD.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SGD.py
@@ -2,6 +2,7 @@ from sklearn.linear_model import SGDClassifier
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -16,26 +17,30 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_SGD = Pipeline([('classifier', SGDClassifier())])
-    param_SGD = {"classifier__loss": kwargs['1'], "classifier__penalty": kwargs['2'],
-                 "classifier__alpha": map(float, kwargs['0'])}
-    grid_SGD = GridSearchCV(pipeline_SGD, param_grid=param_SGD, refit=True, n_jobs=nbCores, scoring='accuracy',
-                            cv=nbFolds)
-    SGD_detector = grid_SGD.fit(X_train, y_train)
-    desc_params = [SGD_detector.best_params_["classifier__loss"], SGD_detector.best_params_["classifier__penalty"],
-                   SGD_detector.best_params_["classifier__alpha"]]
-    description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
-    return description, SGD_detector
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_SGD = Pipeline([('classifier', SGDClassifier())])
+#     param_SGD = {"classifier__loss": kwargs['1'], "classifier__penalty": kwargs['2'],
+#                  "classifier__alpha": map(float, kwargs['0'])}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_SGD = GridSearchCV(pipeline_SGD, param_grid=param_SGD, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                             cv=nbFolds)
+#     SGD_detector = grid_SGD.fit(X_train, y_train)
+#     desc_params = [SGD_detector.best_params_["classifier__loss"], SGD_detector.best_params_["classifier__penalty"],
+#                    SGD_detector.best_params_["classifier__alpha"]]
+#     description = "Classif_" + "Lasso" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
+#     return description, SGD_detector
 
 
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_SGD = Pipeline([('classifier', SGDClassifier())])
     losses = ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron']
     penalties = ["l1", "l2", "elasticnet"]
     alphas = list(np.random.randint(1,10,10))+list(np.random.random_sample(10))
     param_SGD = {"classifier__loss": losses, "classifier__penalty": penalties,
                  "classifier__alpha": alphas}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_SGD = GridSearchCV(pipeline_SGD, param_grid=param_SGD, refit=True, n_jobs=nbCores, scoring='accuracy',
                             cv=nbFolds)
     SGD_detector = grid_SGD.fit(X_train, y_train)
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMLinear.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMLinear.py
index 6f59cb29..43619432 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMLinear.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMLinear.py
@@ -2,6 +2,7 @@ from sklearn.svm import SVC
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -11,22 +12,27 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_SVMLinear = Pipeline([('classifier', SVC(kernel="linear"))])
-    param_SVMLinear = {"classifier__C": map(int, kwargs['0'])}
-    grid_SVMLinear = GridSearchCV(pipeline_SVMLinear, param_grid=param_SVMLinear, refit=True, n_jobs=nbCores, scoring='accuracy',
-                                  cv=nbFolds)
-    SVMLinear_detector = grid_SVMLinear.fit(X_train, y_train)
-    desc_params = [SVMLinear_detector.best_params_["classifier__C"]]
-    description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
-    return description, SVMLinear_detector
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_SVMLinear = Pipeline([('classifier', SVC(kernel="linear"))])
+#     param_SVMLinear = {"classifier__C": map(int, kwargs['0'])}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_SVMLinear = GridSearchCV(pipeline_SVMLinear, param_grid=param_SVMLinear, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                                   cv=nbFolds)
+#     SVMLinear_detector = grid_SVMLinear.fit(X_train, y_train)
+#     desc_params = [SVMLinear_detector.best_params_["classifier__C"]]
+#     description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
+#     return description, SVMLinear_detector
 
 
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_SVMLinear = Pipeline([('classifier', SVC(kernel="linear"))])
     param_SVMLinear = {"classifier__C":np.random.randint(1,2000,30)}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_SVMLinear = GridSearchCV(pipeline_SVMLinear, param_grid=param_SVMLinear, refit=True, n_jobs=nbCores, scoring='accuracy',
                                   cv=nbFolds)
+
     SVMLinear_detector = grid_SVMLinear.fit(X_train, y_train)
     desc_params = [SVMLinear_detector.best_params_["classifier__C"]]
     return desc_params
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMPoly.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMPoly.py
index 1c7ee599..7db4dd56 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMPoly.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMPoly.py
@@ -2,6 +2,7 @@ from sklearn.svm import SVC
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -12,17 +13,29 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_SVMPoly = Pipeline([('classifier', SVC(kernel="poly"))])
-    param_SVMPoly= {"classifier__C": np.random.randint(1,2000,30), "classifier__degree": np.random.randint(1,10,5)}
-    grid_SVMPoly = GridSearchCV(pipeline_SVMPoly, param_grid=param_SVMPoly, refit=True, n_jobs=nbCores, scoring='accuracy',
-                                  cv=nbFolds)
-    SVMPoly_detector = grid_SVMPoly.fit(X_train, y_train)
-    desc_params = [SVMPoly_detector.best_params_["classifier__C"], SVMPoly_detector.best_params_["classifier__degree"]]
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_SVMPoly = Pipeline([('classifier', SVC(kernel="poly"))])
+#     param_SVMPoly= {"classifier__C": np.random.randint(1,2000,30), "classifier__degree": np.random.randint(1,10,5)}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_SVMPoly = GridSearchCV(pipeline_SVMPoly, param_grid=param_SVMPoly, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                                   cv=nbFolds)
+#     SVMPoly_detector = grid_SVMPoly.fit(X_train, y_train)
+#     desc_params = [SVMPoly_detector.best_params_["classifier__C"], SVMPoly_detector.best_params_["classifier__degree"]]
+#     return desc_params
+
+
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+    pipeline_SVMRBF = Pipeline([('classifier', SVC(kernel="poly"))])
+    param_SVMRBF = {"classifier__C": np.random.randint(1,2000,30)}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+    grid_SVMRBF = GridSearchCV(pipeline_SVMRBF, param_grid=param_SVMRBF, refit=True, n_jobs=nbCores, scoring='accuracy',
+                               cv=nbFolds)
+    SVMRBF_detector = grid_SVMRBF.fit(X_train, y_train)
+    desc_params = [SVMRBF_detector.best_params_["classifier__C"]]
     return desc_params
 
 
-
-
 def getConfig(config):
     return "\n\t\t- SVM with C : "+config[0]+", kernel : "+config[1]
\ No newline at end of file
diff --git a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMRBF.py b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMRBF.py
index ae527e05..7c2e9276 100644
--- a/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMRBF.py
+++ b/Code/MonoMutliViewClassifiers/MonoviewClassifiers/SVMRBF.py
@@ -2,6 +2,7 @@ from sklearn.svm import SVC
 from sklearn.pipeline import Pipeline                   # Pipelining in classification
 from sklearn.grid_search import GridSearchCV
 import numpy as np
+import Metrics
 
 
 def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
@@ -11,20 +12,24 @@ def fit(DATASET, CLASS_LABELS, NB_CORES=1,**kwargs):
     return "No desc", classifier
 
 
-def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
-    pipeline_SVMRBF = Pipeline([('classifier', SVC(kernel="rbf"))])
-    param_SVMRBF = {"classifier__C": map(int, kwargs['0'])}
-    grid_SVMRBF = GridSearchCV(pipeline_SVMRBF, param_grid=param_SVMRBF, refit=True, n_jobs=nbCores, scoring='accuracy',
-                               cv=nbFolds)
-    SVMRBF_detector = grid_SVMRBF.fit(X_train, y_train)
-    desc_params = [SVMRBF_detector.best_params_["classifier__C"]]
-    description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
-    return description, SVMRBF_detector
+# def fit_gridsearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
+#     pipeline_SVMRBF = Pipeline([('classifier', SVC(kernel="rbf"))])
+#     param_SVMRBF = {"classifier__C": map(int, kwargs['0'])}
+#     metricModule = getattr(Metrics, metric[0])
+#     scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
+#     grid_SVMRBF = GridSearchCV(pipeline_SVMRBF, param_grid=param_SVMRBF, refit=True, n_jobs=nbCores, scoring='accuracy',
+#                                cv=nbFolds)
+#     SVMRBF_detector = grid_SVMRBF.fit(X_train, y_train)
+#     desc_params = [SVMRBF_detector.best_params_["classifier__C"]]
+#     description = "Classif_" + "SVC" + "-" + "CV_" + str(nbFolds) + "-" + "-".join(map(str,desc_params))
+#     return description, SVMRBF_detector
 
 
-def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, **kwargs):
+def gridSearch(X_train, y_train, nbFolds=4, nbCores=1, metric=["accuracy_score", None], **kwargs):
     pipeline_SVMRBF = Pipeline([('classifier', SVC(kernel="rbf"))])
     param_SVMRBF = {"classifier__C": np.random.randint(1,2000,30)}
+    metricModule = getattr(Metrics, metric[0])
+    scorer = metricModule.get_scorer(dict((index, metricConfig) for index, metricConfig in enumerate(metric[1])))
     grid_SVMRBF = GridSearchCV(pipeline_SVMRBF, param_grid=param_SVMRBF, refit=True, n_jobs=nbCores, scoring='accuracy',
                                cv=nbFolds)
     SVMRBF_detector = grid_SVMRBF.fit(X_train, y_train)
diff --git a/Code/MonoMutliViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMutliViewClassifiers/Multiview/ExecMultiview.py
index 75dceac7..450624ec 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/ExecMultiview.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/ExecMultiview.py
@@ -16,12 +16,15 @@ import time
 
 
 
-def ExecMultiview(DATASET, name, learningRate, nbFolds, nbCores, databaseType, path, LABELS_DICTIONARY, gridSearch=False, **kwargs):
+def ExecMultiview(DATASET, name, learningRate, nbFolds, nbCores, databaseType, path, LABELS_DICTIONARY,
+                  gridSearch=False, metrics=None,**kwargs):
 
     datasetLength = DATASET.get("Metadata").attrs["datasetLength"]
     NB_VIEW = DATASET.get("Metadata").attrs["nbView"]
     views = [str(DATASET.get("View"+str(viewIndex)).attrs["name"]) for viewIndex in range(NB_VIEW)]
     NB_CLASS = DATASET.get("Metadata").attrs["nbClass"]
+    if not metrics:
+        metrics = ["accuracy_score" for view in range (NB_VIEW)]
 
     CL_type = kwargs["CL_type"]
     views = kwargs["views"]
@@ -78,7 +81,8 @@ def ExecMultiview(DATASET, name, learningRate, nbFolds, nbCores, databaseType, p
 
     if gridSearch:
         logging.info("Start:\t Gridsearching best settings for monoview classifiers")
-        bestSettings, fusionConfig = classifierGridSearch(DATASET, classificationKWARGS, learningIndices)
+        bestSettings, fusionConfig = classifierGridSearch(DATASET, classificationKWARGS, learningIndices
+                                                          , metrics=metrics)
         classificationKWARGS["classifiersConfigs"] = bestSettings
         try:
             classificationKWARGS["fusionMethodConfig"] = fusionConfig
diff --git a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Fusion.py b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Fusion.py
index 899350e0..73d82040 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Fusion.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Fusion.py
@@ -1,8 +1,22 @@
 from Methods import *
 import MonoviewClassifiers
+import numpy as np
 
 
-def gridSearch_hdf5(DATASET, classificationKWARGS):
+def makeMonoviewData_hdf5(DATASET, weights=None, usedIndices=None):
+    if not usedIndices:
+        uesdIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
+    NB_VIEW = DATASET.get("Metadata").attrs["nbView"]
+    if type(weights)=="NoneType":
+        weights = np.array([1/NB_VIEW for i in range(NB_VIEW)])
+    if sum(weights)!=1:
+        weights = weights/sum(weights)
+    monoviewData = np.concatenate([weights[viewIndex]*DATASET.get("View"+str(viewIndex))[usedIndices, :]
+                                        for viewIndex in np.arange(NB_VIEW)], axis=1)
+    return monoviewData
+
+
+def gridSearch_hdf5(DATASET, classificationKWARGS, learningIndices, metrics=None):
     fusionTypeName = classificationKWARGS["fusionType"]
     fusionTypePackage = globals()[fusionTypeName+"Package"]
     fusionMethodModuleName = classificationKWARGS["fusionMethod"]
@@ -12,10 +26,14 @@ def gridSearch_hdf5(DATASET, classificationKWARGS):
     for classifierIndex, classifierName in enumerate(classifiersNames):
         classifierModule = getattr(MonoviewClassifiers, classifierName)
         classifierMethod = getattr(classifierModule, "gridSearch")
-        bestSettings.append(classifierMethod(DATASET.get("View"+str(classifierIndex))[...],
-                                             DATASET.get("labels")[...]))
+        if fusionMethodModuleName == "LateFusion":
+            bestSettings.append(classifierMethod(DATASET.get("View"+str(classifierIndex))[learningIndices],
+                                                 DATASET.get("labels")[learningIndices], metrics=metrics[classifierIndex]))
+        else:
+            bestSettings.append(classifierMethod(makeMonoviewData_hdf5(DATASET, usedIndices=learningIndices),
+                                                 DATASET.get("labels")[learningIndices], metrics=metrics[classifierIndex]))
     classificationKWARGS["classifiersConfigs"] = bestSettings
-    fusionMethodConfig = fusionMethodModule.gridSearch(DATASET, classificationKWARGS)
+    fusionMethodConfig = fusionMethodModule.gridSearch(DATASET, classificationKWARGS, learningIndices)
     return bestSettings, fusionMethodConfig
 
 
diff --git a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/EarlyFusion.py b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/EarlyFusion.py
index 5c0875c3..627e1bb4 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/EarlyFusion.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/Fusion/Methods/EarlyFusion.py
@@ -27,50 +27,50 @@ class EarlyFusionClassifier(object):
 
 
 
-class WeightedLinear(EarlyFusionClassifier):
-    def __init__(self, NB_CORES=1, **kwargs):
-        EarlyFusionClassifier.__init__(self, kwargs['classifiersNames'], kwargs['monoviewClassifiersConfigs'],
-                                      NB_CORES=NB_CORES)
-        self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
-
-    def fit_hdf5(self, DATASET, trainIndices=None):
-        if not trainIndices:
-            trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=trainIndices)
-        monoviewClassifierModule = getattr(MonoviewClassifiers, self.monoviewClassifierName)
-        desc, self.monoviewClassifier = monoviewClassifierModule.fit(self.monoviewData, DATASET.get("labels")[trainIndices],
-                                                               NB_CORES=self.nbCores,
-                                                               **dict((str(configIndex),config) for configIndex,config in
-                                                                      enumerate(self.monoviewClassifiersConfig)))
-
-    def predict_hdf5(self, DATASET, usedIndices=None):
-        if usedIndices == None:
-            usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        if usedIndices:
-            self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices)
-            predictedLabels = self.monoviewClassifier.predict(self.monoviewData)
-        else:
-            predictedLabels=[]
-        return predictedLabels
-
-    def predict_proba_hdf5(self, DATASET, usedIndices=None):
-        if usedIndices == None:
-            usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        if usedIndices:
-            self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices)
-            predictedLabels = self.monoviewClassifier.predict_proba(self.monoviewData)
-        else:
-            predictedLabels=[]
-        return predictedLabels
-
-    def getConfig(self, fusionMethodConfig ,monoviewClassifiersNames, monoviewClassifiersConfigs):
-        configString = "with weighted concatenation, using weights : "+", ".join(map(str, self.weights))+\
-                       " with monoview classifier : "
-        monoviewClassifierModule = getattr(MonoviewClassifiers, monoviewClassifiersNames[0])
-        configString += monoviewClassifierModule.getConfig(monoviewClassifiersConfigs[0])
-        return configString
-
-    def gridSearch(self, classificationKWARGS):
-
-        return
+# class WeightedLinear(EarlyFusionClassifier):
+#     def __init__(self, NB_CORES=1, **kwargs):
+#         EarlyFusionClassifier.__init__(self, kwargs['classifiersNames'], kwargs['monoviewClassifiersConfigs'],
+#                                       NB_CORES=NB_CORES)
+#         self.weights = np.array(map(float, kwargs['fusionMethodConfig'][0]))
+#
+#     def fit_hdf5(self, DATASET, trainIndices=None):
+#         if not trainIndices:
+#             trainIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
+#         self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=trainIndices)
+#         monoviewClassifierModule = getattr(MonoviewClassifiers, self.monoviewClassifierName)
+#         desc, self.monoviewClassifier = monoviewClassifierModule.fit(self.monoviewData, DATASET.get("labels")[trainIndices],
+#                                                                NB_CORES=self.nbCores,
+#                                                                **dict((str(configIndex),config) for configIndex,config in
+#                                                                       enumerate(self.monoviewClassifiersConfig)))
+#
+#     def predict_hdf5(self, DATASET, usedIndices=None):
+#         if usedIndices == None:
+#             usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
+#         if usedIndices:
+#             self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices)
+#             predictedLabels = self.monoviewClassifier.predict(self.monoviewData)
+#         else:
+#             predictedLabels=[]
+#         return predictedLabels
+#
+#     def predict_proba_hdf5(self, DATASET, usedIndices=None):
+#         if usedIndices == None:
+#             usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
+#         if usedIndices:
+#             self.makeMonoviewData_hdf5(DATASET, weights=self.weights, usedIndices=usedIndices)
+#             predictedLabels = self.monoviewClassifier.predict_proba(self.monoviewData)
+#         else:
+#             predictedLabels=[]
+#         return predictedLabels
+#
+#     def getConfig(self, fusionMethodConfig ,monoviewClassifiersNames, monoviewClassifiersConfigs):
+#         configString = "with weighted concatenation, using weights : "+", ".join(map(str, self.weights))+\
+#                        " with monoview classifier : "
+#         monoviewClassifierModule = getattr(MonoviewClassifiers, monoviewClassifiersNames[0])
+#         configString += monoviewClassifierModule.getConfig(monoviewClassifiersConfigs[0])
+#         return configString
+#
+#     def gridSearch(self, classificationKWARGS):
+#
+#         return
 
diff --git a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/DecisionTree.py b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/DecisionTree.py
index 5aa54619..97d57a57 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/DecisionTree.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/DecisionTree.py
@@ -31,7 +31,7 @@ def getConfig(classifierConfig):
     return 'with depth ' + str(depth) + ', ' + ' sub-sampled at ' + str(subSampling) + ' '
 
 
-def gridSearch(data, labels):
+def gridSearch(data, labels, metrics="accuracy_score"):
     minSubSampling = 1.0/(len(labels)/2)
     bestSettings = []
     bestResults = []
diff --git a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/Kover.py b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/Kover.py
index f0e3dc15..ae1123e8 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/Kover.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Classifiers/Kover.py
@@ -6,9 +6,4 @@ from ModifiedMulticlass import OneVsRestClassifier
 
 # Add weights
 
-
-def Kover(data, labels, arg, weights,):
-    isBad = False
-    subSamplingRatio = arg[0]
-
-    return classifier, prediction, isBad
\ No newline at end of file
+pass
diff --git a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Mumbo.py b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Mumbo.py
index c4a02a28..19dfc884 100644
--- a/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Mumbo.py
+++ b/Code/MonoMutliViewClassifiers/Multiview/Mumbo/Mumbo.py
@@ -21,6 +21,7 @@ def computeWeights(DATASET_LENGTH, iterIndex, viewIndice, CLASS_LABELS, costMatr
                         for exampleIndice in range(DATASET_LENGTH)])
     return weights
 
+
 def trainWeakClassifier(classifierName, monoviewDataset, CLASS_LABELS,
                         DATASET_LENGTH, viewIndice, classifier_config, iterIndex, costMatrices):
     weights = computeWeights(DATASET_LENGTH, iterIndex, viewIndice, CLASS_LABELS, costMatrices)
@@ -30,6 +31,7 @@ def trainWeakClassifier(classifierName, monoviewDataset, CLASS_LABELS,
     logging.debug("\t\t\tView " + str(viewIndice) + " : " + str(averageAccuracy))
     return classifier, classes, isBad, averageAccuracy
 
+
 def trainWeakClassifier_hdf5(classifierName, monoviewDataset, CLASS_LABELS, DATASET_LENGTH,
                              viewIndice, classifier_config, viewName, iterIndex, costMatrices):
     weights = computeWeights(DATASET_LENGTH, iterIndex, viewIndice, CLASS_LABELS, costMatrices)
@@ -39,22 +41,20 @@ def trainWeakClassifier_hdf5(classifierName, monoviewDataset, CLASS_LABELS, DATA
     logging.debug("\t\t\tView " + str(viewIndice) + " : " + str(averageAccuracy))
     return classifier, classes, isBad, averageAccuracy
 
-def gridSearch_hdf5(DATASET, classificationKWARGS):
+
+def gridSearch_hdf5(DATASET, classificationKWARGS, learningIndices, metrics=None):
     classifiersNames = classificationKWARGS["classifiersNames"]
     bestSettings = []
     for classifierIndex, classifierName in enumerate(classifiersNames):
         logging.debug("\tStart:\t Gridsearch for "+classifierName+" on "+DATASET.get("View"+str(classifierIndex)).attrs["name"])
         classifierModule = globals()[classifierName]  # Permet d'appeler une fonction avec une string
         classifierMethod = getattr(classifierModule, "gridSearch")
-        bestSettings.append(classifierMethod(DATASET.get("View"+str(classifierIndex))[...],
-                                             DATASET.get("labels")[...]))
+        bestSettings.append(classifierMethod(DATASET.get("View"+str(classifierIndex))[learningIndices],
+                                             DATASET.get("labels")[learningIndices], metrics=metrics[classifierIndex]))
         logging.debug("\tDone:\t Gridsearch for "+classifierName)
     return bestSettings, None
 
 
-
-
-
 class Mumbo:
 
     def __init__(self, NB_VIEW, DATASET_LENGTH, CLASS_LABELS, NB_CORES=1,**kwargs):
-- 
GitLab