diff --git a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
index e7922f97dc3047c256d11800b3cd381eac4f7023..b048e8acab948bed42ef485c7dfcf5f7850efe86 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/ExecClassifMonoView.py
@@ -63,8 +63,8 @@ def initTrainTest(X, Y, classificationIndices):
     return X_train, y_train, X_test, y_test, X_test_multiclass
 
 
-def getKWARGS(classifierModule, hyperParamSearch, nIter, CL_type, X_train, y_train, randomState,
-              outputFileName, KFolds, nbCores, metrics, kwargs):
+def getHPs(classifierModule, hyperParamSearch, nIter, CL_type, X_train, y_train, randomState,
+           outputFileName, KFolds, nbCores, metrics, kwargs):
     if hyperParamSearch != "None":
         logging.debug("Start:\t " + hyperParamSearch + " best settings with " + str(nIter) + " iterations for " + CL_type)
         classifierHPSearch = getattr(classifierModule, hyperParamSearch)
@@ -140,10 +140,10 @@ def ExecMonoview(directory, X, Y, name, labelsNames, classificationIndices, KFol
 
     logging.debug("Start:\t Generate classifier args")
     classifierModule = getattr(MonoviewClassifiers, CL_type)
-    clKWARGS = getKWARGS(classifierModule, hyperParamSearch,
-                         nIter, CL_type, X_train, y_train,
-                         randomState, outputFileName,
-                         KFolds, nbCores, metrics, kwargs)
+    clKWARGS = getHPs(classifierModule, hyperParamSearch,
+                      nIter, CL_type, X_train, y_train,
+                      randomState, outputFileName,
+                      KFolds, nbCores, metrics, kwargs)
     logging.debug("Done:\t Generate classifier args")
 
     logging.debug("Start:\t Training")
@@ -192,6 +192,7 @@ if __name__ == '__main__':
      So one need to fill in all the ExecMonoview function arguments with the parse arg function
      It could be a good idea to use pickle to store all the 'simple' args in order to reload them easily"""
     import argparse
+    import pickle
 
     parser = argparse.ArgumentParser(
         description='This methods is used to execute a multiclass classification with one single view. ',
@@ -270,6 +271,9 @@ if __name__ == '__main__':
                  randomState, hyperParamSearch=hyperParamSearch,
                  metrics=metrics, nIter=nIter, **kwargs)
 
+    with open(directory + "res.pickle", "wb") as handle:
+        pickle.dump(randomState, handle)
+
 
     # Pickle the res in a file to be reused.
     # Go put a token in the token files without breaking everything.
diff --git a/Code/MonoMultiViewClassifiers/Monoview/analyzeResult.py b/Code/MonoMultiViewClassifiers/Monoview/analyzeResult.py
index 4084e172c2dd8ff48d978675572efdbcbbdc145a..b88fe79b1c955067cb37d4f0a5159de90a4b05cd 100644
--- a/Code/MonoMultiViewClassifiers/Monoview/analyzeResult.py
+++ b/Code/MonoMultiViewClassifiers/Monoview/analyzeResult.py
@@ -49,8 +49,7 @@ def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, fea
     metricModule = getattr(Metrics, metrics[0][0])
     trainScore = metricModule.score(y_train, y_train_pred)
     testScore = metricModule.score(y_test, y_test_pred)
-    stringAnalysis = "Classification on " + name + " database for " + feat + " with " + CL_type + ", random state is " + str(
-        randomState) + ".\n\n"
+    stringAnalysis = "Classification on " + name + " database for " + feat + " with " + CL_type + ".\n\n"
     stringAnalysis += metrics[0][0] + " on train : " + str(trainScore) + "\n" + metrics[0][0] + " on test : " + str(
         testScore) + "\n\n"
     stringAnalysis += getDBConfigString(name, feat, learningRate, shape, classLabelsNames, KFolds)
@@ -69,7 +68,7 @@ def execute(name, learningRate, KFolds, nbCores, gridSearch, metrics, nIter, fea
         #                             getattr(Metrics, metric[0]).score(y_test, y_test_pred)]
     stringAnalysis += "\n\n Classification took " + str(hms(seconds=int(time)))
     stringAnalysis += "\n\n Classifier Interpretation : \n"
-    stringAnalysis+= classifierIntepretString
+    stringAnalysis += classifierIntepretString
 
     imageAnalysis = {}
     return stringAnalysis, imageAnalysis, metricsScores
diff --git a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
index 9903abf4933af2ed6c976d61aa03f6b7260ef65c..02bb0ec78811f421f0420011e2708f22165c37e2 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
@@ -149,7 +149,7 @@ def ExecMultiview(directory, DATASET, name, classificationIndices, KFolds, nbCor
                                                                            LABELS_DICTIONARY, views, nbCores, times,
                                                                            name, KFolds,
                                                                            hyperParamSearch, nIter, metrics,
-                                                                           viewsIndices, randomState, labels)
+                                                                           viewsIndices, randomState, labels, classifierModule)
     logging.info("Done:\t Result Analysis for " + CL_type)
 
     logging.debug("Start:\t Saving preds")
diff --git a/Code/MonoMultiViewClassifiers/Multiview/__init__.py b/Code/MonoMultiViewClassifiers/Multiview/__init__.py
index 20a27f754846226c19b72d2d6e7978993e44be7e..6a2b51c51ee6325f13ce99782867eab995d167a6 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/__init__.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/__init__.py
@@ -1,5 +1,5 @@
-# from Code.MonoMultiViewClassifiers.MultiviewClassifiers import Fusion, Mumbo
-from . import ExecMultiview
-# from . import Mumbo
-
-__all__ = ['Fusion', 'Mumbo']
+# # from Code.MonoMultiViewClassifiers.MultiviewClassifiers import Fusion, Mumbo
+# from . import ExecMultiview
+# # from . import Mumbo
+#
+# __all__ = ['Fusion', 'Mumbo']
diff --git a/Code/MonoMultiViewClassifiers/Multiview/analyzeResults.py b/Code/MonoMultiViewClassifiers/Multiview/analyzeResults.py
new file mode 100644
index 0000000000000000000000000000000000000000..48c91ff57e2a846dfb149c1e6699e217c0f17ab8
--- /dev/null
+++ b/Code/MonoMultiViewClassifiers/Multiview/analyzeResults.py
@@ -0,0 +1,82 @@
+from .. import Metrics
+
+# Author-Info
+__author__ = "Baptiste Bauvin"
+__status__ = "Prototype"  # Production, Development, Prototype
+
+def printMetricScore(metricScores, metrics):
+    metricScoreString = "\n\n"
+    for metric in metrics:
+        metricModule = getattr(Metrics, metric[0])
+        if metric[1] is not None:
+            metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
+        else:
+            metricKWARGS = {}
+        metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
+        metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
+        metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
+        metricScoreString += "\n\n"
+    return metricScoreString
+
+
+def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
+    metricModule = getattr(Metrics, metric[0])
+    if metric[1] is not None:
+        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
+    else:
+        metricKWARGS = {}
+    try:
+        trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
+    except:
+        print(labels[learningIndices])
+        print(trainLabels)
+        import pdb;pdb.set_trace()
+    testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
+    return [trainScore, testScore]
+
+
+def getMetricsScores(metrics, trainLabels, testLabels,
+                     validationIndices, learningIndices, labels):
+    metricsScores = {}
+    for metric in metrics:
+        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
+                                                        validationIndices, learningIndices, labels)
+    return metricsScores
+
+
+def execute(classifier, trainLabels,
+            testLabels, DATASET,
+            classificationKWARGS, classificationIndices,
+            LABELS_DICTIONARY, views, nbCores, times,
+            name, KFolds,
+            hyperParamSearch, nIter, metrics,
+            viewsIndices, randomState, labels, classifierModule):
+
+    classifierNameString = classifierModule.genName(classificationKWARGS)
+    CLASS_LABELS = labels
+    learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
+
+    metricModule = getattr(Metrics, metrics[0][0])
+    if metrics[0][1] is not None:
+        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1]))
+    else:
+        metricKWARGS = {}
+    scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices], CLASS_LABELS[learningIndices], **metricKWARGS)
+    scoreOnTest = metricModule.score(CLASS_LABELS[validationIndices], testLabels, **metricKWARGS)
+
+    classifierConfiguration = classifier.getConfigString(classificationKWARGS)
+
+    stringAnalysis = "\t\tResult for Multiview classification with " + classifierNameString + \
+                     "\n\n" + metrics[0][0] + " :\n\t-On Train : " + str(scoreOnTrain) + "\n\t-On Test : " + str(
+        scoreOnTest) + \
+                     "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
+                     ', '.join(LABELS_DICTIONARY.values()) + "\n\t-Views : " + ', '.join(views) + "\n\t-" + str(
+        KFolds.n_splits) + \
+                     " folds\n\nClassification configuration : \n\t-Algorithm used : " + classifierNameString + " with : " + classifierConfiguration
+
+    metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
+                                     validationIndices, learningIndices, labels)
+    stringAnalysis += printMetricScore(metricsScores, metrics)
+    stringAnalysis += "\n\n" + classifier.getSpecificAnalysis(classificationKWARGS)
+    imagesAnalysis = {}
+    return stringAnalysis, imagesAnalysis, metricsScores
\ No newline at end of file
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
index ad57342b2e4acf29e3eda5d739e62ab492662c7f..0f312005b55c94ef84add14c3bdafb04a1c95d64 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
@@ -1,5 +1,6 @@
 import numpy as np
 
+
 def genName(config):
     return "FatLateFusion"
 
@@ -34,6 +35,7 @@ def genParamsSets(classificationKWARGS, randomState, nIter=1):
     nomralizedWeights = [[weightVector/np.sum(weightVector)] for weightVector in weights] 
     return nomralizedWeights
 
+
 class FatLateFusionClass:
 
     def __init__(self, randomState, NB_CORES=1, **kwargs):
@@ -61,12 +63,10 @@ class FatLateFusionClass:
 
     def predict_probas_hdf5(self, DATASET, usedIndices=None):
         pass
-        # if usedIndices is None:
-        #     usedIndices = range(DATASET.get("Metadata").attrs["datasetLength"])
-        # votes = np.zeros((DATASET.get("Metadata").attrs["datasetLength"], DATASET.get("Metadata").attrs["nbClass"]), dtype=float)
-        # for exampleIndex in usedIndices:
-        #     for monoviewDecisionIndex, monoviewDecision in enumerate(self.monoviewDecisions):
-        #         votes[exampleIndex, monoviewDecision[exampleIndex]] += self.weights[monoviewDecisionIndex]
-        # predictedProbas =
-        # return predictedLabels
 
+    def getConfigString(self, classificationKWARGS):
+        return "weights : "+", ".join(map(str, list(self.weights)))
+
+    def getSpecificAnalysis(self, classificationKWARGS):
+        stringAnalysis = ''
+        return stringAnalysis
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
index 514655c8551dcdf227d62b0b5cb915e4ccd7acee..af44a6fb415aa97cd62c1a52d10dbca3baf3041b 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
@@ -1,5 +1,4 @@
-from ... import Metrics
-from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
+from ...Multiview import analyzeResults
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -12,31 +11,11 @@ def execute(classifier, trainLabels,
             LABELS_DICTIONARY, views, nbCores, times,
             name, KFolds,
             hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels):
-    CLASS_LABELS = labels
-    learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
-
-    metricModule = getattr(Metrics, metrics[0][0])
-    if metrics[0][1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1]))
-    else:
-        metricKWARGS = {}
-    scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices], CLASS_LABELS[learningIndices], **metricKWARGS)
-    scoreOnTest = metricModule.score(CLASS_LABELS[validationIndices], testLabels, **metricKWARGS)
-
-    fusionConfiguration = "with weights : "+ ", ".join(map(str, list(classifier.weights)))
-
-    stringAnalysis = "\t\tResult for Multiview classification with FatLateFusion "+ \
-                     "\n\n" + metrics[0][0] + " :\n\t-On Train : " + str(scoreOnTrain) + "\n\t-On Test : " + str(
-        scoreOnTest) + \
-                     "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
-                     ', '.join(LABELS_DICTIONARY.values()) + "\n\t-Views : " + ', '.join(views) + "\n\t-" + str(
-        KFolds.n_splits) + \
-                     " folds\n\nClassification configuration : \n\t-Algorithm used : FatLateFusion " + fusionConfiguration
-
-    metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
-                                     validationIndices, learningIndices, labels)
-    stringAnalysis += printMetricScore(metricsScores, metrics)
-
-    imagesAnalysis = {}
-    return stringAnalysis, imagesAnalysis, metricsScores
+            viewsIndices, randomState, labels, classifierModule):
+    return analyzeResults.execute(classifier, trainLabels,
+            testLabels, DATASET,
+            classificationKWARGS, classificationIndices,
+            LABELS_DICTIONARY, views, nbCores, times,
+            name, KFolds,
+            hyperParamSearch, nIter, metrics,
+            viewsIndices, randomState, labels, classifierModule)
\ No newline at end of file
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
index b10aeaf0f89512e6728ce91d55da9e4c985b1dd5..04ad1ab9ca6396a5010132ba0aacb653f511db80 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/FusionModule.py
@@ -185,3 +185,18 @@ class FusionClass:
         else:
             predictedLabels = []
         return predictedLabels
+
+    def getConfigString(self, classificationKWARGS):
+        monoviewClassifiersNames = classificationKWARGS["classifiersNames"]
+        monoviewClassifiersConfigs = classificationKWARGS["classifiersConfigs"]
+        fusionMethodConfig = classificationKWARGS["fusionMethodConfig"]
+        return self.classifier.getConfig(fusionMethodConfig, monoviewClassifiersNames,
+                                                          monoviewClassifiersConfigs)
+
+    def getSpecificAnalysis(self, classificationKWARGS):
+        fusionType = classificationKWARGS["fusionType"]
+        if fusionType == "LateFusion":
+            stringAnalysis = Methods.LateFusion.getScores(self)
+        else:
+            stringAnalysis = ''
+        return stringAnalysis
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
index e2ac828e712382d98656625776d7a125cf5676fd..0348bdd827c715e998644c83cabc9cbe6f319f41 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
@@ -1,6 +1,4 @@
-from .Methods import LateFusion
-from ... import Metrics
-from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
+from ...Multiview import analyzeResults
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -13,38 +11,11 @@ def execute(classifier, trainLabels,
             LABELS_DICTIONARY, views, nbCores, times,
             name, KFolds,
             hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels):
-    CLASS_LABELS = labels
-
-    fusionType = classificationKWARGS["fusionType"]
-    monoviewClassifiersNames = classificationKWARGS["classifiersNames"]
-    monoviewClassifiersConfigs = classificationKWARGS["classifiersConfigs"]
-    fusionMethodConfig = classificationKWARGS["fusionMethodConfig"]
-
-    learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
-    metricModule = getattr(Metrics, metrics[0][0])
-    if metrics[0][1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metrics[0][1]))
-    else:
-        metricKWARGS = {}
-    scoreOnTrain = metricModule.score(CLASS_LABELS[learningIndices], CLASS_LABELS[learningIndices], **metricKWARGS)
-    scoreOnTest = metricModule.score(CLASS_LABELS[validationIndices], testLabels, **metricKWARGS)
-    fusionConfiguration = classifier.classifier.getConfig(fusionMethodConfig, monoviewClassifiersNames,
-                                                          monoviewClassifiersConfigs)
-    stringAnalysis = "\t\tResult for Multiview classification with " + fusionType + " and random state : " + str(
-        randomState) + \
-                     "\n\n" + metrics[0][0] + " :\n\t-On Train : " + str(scoreOnTrain) + "\n\t-On Test : " + str(
-        scoreOnTest) + \
-                     "\n\nDataset info :\n\t-Database name : " + name + "\n\t-Labels : " + \
-                     ', '.join(LABELS_DICTIONARY.values()) + "\n\t-Views : " + ', '.join(views) + "\n\t-" + str(
-        KFolds.n_splits) + \
-                     " folds\n\nClassification configuration : \n\t-Algorithm used : " + fusionType + " " + fusionConfiguration
-
-    if fusionType == "LateFusion":
-        stringAnalysis += LateFusion.getScores(classifier)
-    metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
-                                     validationIndices, learningIndices, labels)
-    stringAnalysis += printMetricScore(metricsScores, metrics)
-
-    imagesAnalysis = {}
-    return stringAnalysis, imagesAnalysis, metricsScores
+            viewsIndices, randomState, labels, classifierModule):
+    return analyzeResults.execute(classifier, trainLabels,
+            testLabels, DATASET,
+            classificationKWARGS, classificationIndices,
+            LABELS_DICTIONARY, views, nbCores, times,
+            name, KFolds,
+            hyperParamSearch, nIter, metrics,
+            viewsIndices, randomState, labels, classifierModule)
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
index 0ab358f71bb76bd71be553fe8f3a724ba96a552f..db5777a06e899bb94aaaab3e33aa02ce63624d37 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
@@ -187,7 +187,7 @@ def execute(classifier, trainLabels,
             LABELS_DICTIONARY, views, nbCores, times,
             databaseName, KFolds,
             hyperParamSearch, nIter, metrics,
-            viewsIndices, randomState, labels):
+            viewsIndices, randomState, labels, classifierModule):
 
     learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
     if classifier.classifiersConfigs is None:
diff --git a/Code/MonoMultiViewClassifiers/utils/execution.py b/Code/MonoMultiViewClassifiers/utils/execution.py
index fbe1ad676d2c34ed581b1167b81fd6f9c8b88bed..9f7db7a9475b4c2fc830d4abc610c7d888c5b28c 100644
--- a/Code/MonoMultiViewClassifiers/utils/execution.py
+++ b/Code/MonoMultiViewClassifiers/utils/execution.py
@@ -197,6 +197,11 @@ def parseTheArgs(arguments):
                                  help='Determine the weights of each monoview decision for FLF', type=float,
                                  default=[])
 
+    groupMumboNew = parser.add_argument_group('New Mumbo implementation arguments')
+    groupFatLateFusion.add_argument('--MUN_n_estimators', metavar='INT', action='store',
+                                    help='Determine the number of esitmators for mumbo', type=int,
+                                    default=10)
+
     args = parser.parse_args(arguments)
     return args
 
diff --git a/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py b/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
index f1c63a42f00ae6f8f990e78d104706fd33ef2d4a..a1ddf2d636720a8248b24a96c7f44696f8baa404 100644
--- a/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
+++ b/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
@@ -78,65 +78,65 @@ class Test_initTrainTest(unittest.TestCase):
         np.testing.assert_array_equal(y_train, np.array([0,0,1,0,0]))
         np.testing.assert_array_equal(y_test, np.array([1,1,0,0,0]))
 
-class Test_getKWARGS(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.classifierModule = None
-        cls.hyperParamSearch = "gridSearch"
-        cls.nIter = 2
-        cls.CL_type = "string"
-        cls.X_train = np.zeros((10,20))
-        cls.y_train = np.zeros((10))
-        cls.randomState = np.random.RandomState(42)
-        cls.outputFileName = "test_file"
-        cls.KFolds = None
-        cls.nbCores = 1
-        cls.metrics = {"accuracy_score":""}
-        cls.kwargs = {}
-
-    def test_simple(cls):
-        clKWARGS = ExecClassifMonoView.getKWARGS(cls.classifierModule,
-                                                 cls.hyperParamSearch,
-                                                 cls.nIter,
-                                                 cls.CL_type,
-                                                 cls.X_train,
-                                                 cls.y_train,
-                                                 cls.randomState,
-                                                 cls.outputFileName,
-                                                 cls.KFolds,
-                                                 cls.nbCores,
-                                                 cls.metrics,
-                                                 cls.kwargs)
-        pass
-
-class Test_saveResults(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.stringAnalysis = "string analysis"
-        cls.outputFileName = "test_file"
-        cls.full_labels_pred = np.zeros(10)
-        cls.y_train_pred = np.ones(5)
-        cls.y_train = np.zeros(5)
-        cls.imagesAnalysis = {}
-
-    def test_simple(cls):
-        ExecClassifMonoView.saveResults(cls.stringAnalysis,
-                                        cls.outputFileName,
-                                        cls.full_labels_pred,
-                                        cls.y_train_pred,
-                                        cls.y_train,
-                                        cls.imagesAnalysis)
-        # Test if the files are created with the right content
-
-    def test_with_image_analysis(cls):
-        cls.imagesAnalysis = {"test_image":"image.png"} # Image to gen
-        ExecClassifMonoView.saveResults(cls.stringAnalysis,
-                                        cls.outputFileName,
-                                        cls.full_labels_pred,
-                                        cls.y_train_pred,
-                                        cls.y_train,
-                                        cls.imagesAnalysis)
-        # Test if the files are created with the right content
-
+# class Test_getKWARGS(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.classifierModule = None
+#         cls.hyperParamSearch = "None"
+#         cls.nIter = 2
+#         cls.CL_type = "string"
+#         cls.X_train = np.zeros((10,20))
+#         cls.y_train = np.zeros((10))
+#         cls.randomState = np.random.RandomState(42)
+#         cls.outputFileName = "test_file"
+#         cls.KFolds = None
+#         cls.nbCores = 1
+#         cls.metrics = {"accuracy_score":""}
+#         cls.kwargs = {}
+#
+#     def test_simple(cls):
+#         clKWARGS = ExecClassifMonoView.getHPs(cls.classifierModule,
+#                                               cls.hyperParamSearch,
+#                                               cls.nIter,
+#                                               cls.CL_type,
+#                                               cls.X_train,
+#                                               cls.y_train,
+#                                               cls.randomState,
+#                                               cls.outputFileName,
+#                                               cls.KFolds,
+#                                               cls.nbCores,
+#                                               cls.metrics,
+#                                               cls.kwargs)
+#         pass
+#
+# class Test_saveResults(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         cls.stringAnalysis = "string analysis"
+#         cls.outputFileName = "test_file"
+#         cls.full_labels_pred = np.zeros(10)
+#         cls.y_train_pred = np.ones(5)
+#         cls.y_train = np.zeros(5)
+#         cls.imagesAnalysis = {}
+#
+#     def test_simple(cls):
+#         ExecClassifMonoView.saveResults(cls.stringAnalysis,
+#                                         cls.outputFileName,
+#                                         cls.full_labels_pred,
+#                                         cls.y_train_pred,
+#                                         cls.y_train,
+#                                         cls.imagesAnalysis)
+#         # Test if the files are created with the right content
+#
+#     def test_with_image_analysis(cls):
+#         cls.imagesAnalysis = {"test_image":"image.png"} # Image to gen
+#         ExecClassifMonoView.saveResults(cls.stringAnalysis,
+#                                         cls.outputFileName,
+#                                         cls.full_labels_pred,
+#                                         cls.y_train_pred,
+#                                         cls.y_train,
+#                                         cls.imagesAnalysis)
+#         # Test if the files are created with the right content
+#
diff --git a/ipynb/Adding a multiview classifier.ipynb b/ipynb/Adding a multiview classifier.ipynb
index 14cabb5b5e48bf43597fbd2e4edb263a44eea7c9..fb3101d06b066ac5aefd5560d151f7e9932b56b6 100644
--- a/ipynb/Adding a multiview classifier.ipynb	
+++ b/ipynb/Adding a multiview classifier.ipynb	
@@ -536,7 +536,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -547,5 +547,5 @@
   }
  },
  "nbformat": 4,
- "nbformat_minor": 1
-}
+ "nbformat_minor": 0
+}
\ No newline at end of file