diff --git a/Code/MonoMultiViewClassifiers/MonoviewClassifiers/SCM.py b/Code/MonoMultiViewClassifiers/MonoviewClassifiers/SCM.py
index cdb1afacd0c1e3650c726a10c86cc9ebb8f9472f..ef3d4db72064ef758eb3460f6a282e56b2329986 100644
--- a/Code/MonoMultiViewClassifiers/MonoviewClassifiers/SCM.py
+++ b/Code/MonoMultiViewClassifiers/MonoviewClassifiers/SCM.py
@@ -51,8 +51,6 @@ def canProbas():
     return False
 
 
-
-
 def fit(DATASET, CLASS_LABELS, randomState, NB_CORES=1, **kwargs):
     modelType = kwargs['0']
     maxRules = int(kwargs['1'])
diff --git a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
index cec5d36adb21ad09f1243695fcb292541bec9163..22a3a79f071adaebc9286becd7a9029bd23c6ebc 100644
--- a/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
+++ b/Code/MonoMultiViewClassifiers/Multiview/ExecMultiview.py
@@ -57,7 +57,7 @@ def saveResults(LABELS_DICTIONARY, stringAnalysis, views, classifierModule, clas
     outputTextFile.close()
 
     if imagesAnalysis is not None:
-        for imageName in imagesAnalysis:
+        for imageName in imagesAnalysis.keys():
             if os.path.isfile(outputFileName + imageName + ".png"):
                 for i in range(1, 20):
                     testFileName = outputFileName + imageName + "-" + str(i) + ".png"
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
index 6909c099e849088181e3a3852abe5a96358b3190..14c8ff099e1732377ae63aeac37822fd27faeb6d 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/FatLateFusionModule.py
@@ -1,11 +1,9 @@
 import numpy as np
 
-
-
-
 def genName(config):
     return "FatLateFusion"
 
+
 def getBenchmark(benchmark, args=None):
     benchmark["Multiview"]["FatLateFusion"] = ["take_everything"]
     return benchmark
@@ -31,8 +29,8 @@ def getArgs(args, benchmark, views, viewsIndices, randomState, directory, result
 def genParamsSets(classificationKWARGS, randomState, nIter=1):
     """Used to generate parameters sets for the random hyper parameters optimization function"""
     nbMonoviewClassifiers = len(classificationKWARGS["monoviewDecisions"])
-    weights = [randomState.random_sample(nbMonoviewClassifiers) for _ in range(len(classificationKWARGS["monoviewDecisions"]))]
-    nomralizedWeights = [weights/np.sum(weights)]
+    weights = [randomState.random_sample(nbMonoviewClassifiers) for _ in range(nIter)]
+    nomralizedWeights = [[weightVector/np.sum(weightVector)] for weightVector in weights] 
     return nomralizedWeights
 
 class FatLateFusionClass:
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
index 36d5346c1cf621657f7f3ab4fd8bdfb85c7d1ebf..514655c8551dcdf227d62b0b5cb915e4ccd7acee 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/FatLateFusion/analyzeResults.py
@@ -1,4 +1,5 @@
 from ... import Metrics
+from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -39,44 +40,3 @@ def execute(classifier, trainLabels,
 
     imagesAnalysis = {}
     return stringAnalysis, imagesAnalysis, metricsScores
-
-
-def printMetricScore(metricScores, metrics):
-    metricScoreString = "\n\n"
-    for metric in metrics:
-        metricModule = getattr(Metrics, metric[0])
-        if metric[1] is not None:
-            metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-        else:
-            metricKWARGS = {}
-        metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
-        metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
-        metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
-        metricScoreString += "\n\n"
-    return metricScoreString
-
-
-def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
-    metricModule = getattr(Metrics, metric[0])
-    if metric[1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-    else:
-        metricKWARGS = {}
-    try:
-        trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
-    except:
-        print(labels[learningIndices])
-        print(trainLabels)
-        import pdb;pdb.set_trace()
-    testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
-    return [trainScore, testScore]
-
-
-def getMetricsScores(metrics, trainLabels, testLabels,
-                     validationIndices, learningIndices, labels):
-    metricsScores = {}
-    for metric in metrics:
-        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
-                                                        validationIndices, learningIndices, labels)
-    return metricsScores
-
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
index abc5ecb46ccbc647eca2cffa4b70443eb0e0a8f8..e2ac828e712382d98656625776d7a125cf5676fd 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Fusion/analyzeResults.py
@@ -1,57 +1,12 @@
-from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report
-import numpy as np
-import matplotlib
-
-# matplotlib.use('Agg')
-import operator
 from .Methods import LateFusion
 from ... import Metrics
+from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
 
-def printMetricScore(metricScores, metrics):
-    metricScoreString = "\n\n"
-    for metric in metrics:
-        metricModule = getattr(Metrics, metric[0])
-        if metric[1] is not None:
-            metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-        else:
-            metricKWARGS = {}
-        metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
-        metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
-        metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
-        metricScoreString += "\n\n"
-    return metricScoreString
-
-
-def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
-    metricModule = getattr(Metrics, metric[0])
-    if metric[1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-    else:
-        metricKWARGS = {}
-    try:
-        trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
-    except:
-        print(labels[learningIndices])
-        print(trainLabels)
-        import pdb;pdb.set_trace()
-    testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
-    return [trainScore, testScore]
-
-
-def getMetricsScores(metrics, trainLabels, testLabels,
-                     validationIndices, learningIndices, labels):
-    metricsScores = {}
-    for metric in metrics:
-        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
-                                                        validationIndices, learningIndices, labels)
-    return metricsScores
-
-
 def execute(classifier, trainLabels,
             testLabels, DATASET,
             classificationKWARGS, classificationIndices,
diff --git a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
index 15a2acd899043ea30766d9939940882b3465b4f8..0ab358f71bb76bd71be553fe8f3a724ba96a552f 100644
--- a/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
+++ b/Code/MonoMultiViewClassifiers/MultiviewClassifiers/Mumbo/analyzeResults.py
@@ -7,6 +7,7 @@ import numpy as np
 from ... import Metrics
 from ...utils.Dataset import getV, getShape
 from . import Classifiers
+from ...utils.MultiviewResultAnalysis import printMetricScore, getMetricsScores
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
@@ -109,9 +110,6 @@ def getAlgoConfig(classifier, classificationKWARGS, nbCores, viewNames, hyperPar
     algoString += "\n\n"
     algoString += "\n\nComputation time on " + str(nbCores) + " cores : \n\tDatabase extraction time : " + str(
         hms(seconds=int(extractionTime))) + "\n\t"
-    row_format = "{:>15}" * 3
-    algoString += row_format.format("", *['Learn', 'Prediction'])
-    algoString += '\n\t'
     algoString += "\n\tSo a total classification time of " + str(hms(seconds=int(classificationTime))) + ".\n\n"
     algoString += "\n\n"
     return algoString, classifierAnalysis
@@ -121,7 +119,7 @@ def getReport(classifier, CLASS_LABELS, classificationIndices, DATASET, trainLab
               testLabels, viewIndices, metric):
     learningIndices, validationIndices, multiviewTestIndices = classificationIndices
     nbView = len(viewIndices)
-    NB_CLASS = len(set(CLASS_LABELS))  # DATASET.get("Metadata").attrs["nbClass"]
+    NB_CLASS = len(set(CLASS_LABELS))
     metricModule = getattr(Metrics, metric[0])
     fakeViewsIndicesDict = dict(
         (viewIndex, fakeViewIndex) for viewIndex, fakeViewIndex in zip(viewIndices, range(nbView)))
@@ -178,43 +176,6 @@ def modifiedMean(surplusAccuracies):
     return meanAccuracies
 
 
-def printMetricScore(metricScores, metrics):
-    metricScoreString = "\n\n"
-    for metric in metrics:
-        metricModule = getattr(Metrics, metric[0])
-        if metric[1] is not None:
-            metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-        else:
-            metricKWARGS = {}
-        metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
-        metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
-        metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
-        metricScoreString += "\n\n"
-    return metricScoreString
-
-
-def getTotalMetricScores(metric, trainLabels, testLabels,
-                         validationIndices, learningIndices, labels):
-    metricModule = getattr(Metrics, metric[0])
-    if metric[1] is not None:
-        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
-    else:
-        metricKWARGS = {}
-    validationIndices = validationIndices
-    trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
-    testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
-    return [trainScore, testScore]
-
-
-def getMetricsScores(metrics, trainLabels, testLabels,
-                     validationIndices, learningIndices, labels):
-    metricsScores = {}
-    for metric in metrics:
-        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
-                                                        validationIndices, learningIndices, labels)
-    return metricsScores
-
-
 def getMeanIterations(kFoldClassifierStats, foldIndex):
     iterations = np.array([kFoldClassifier[foldIndex].iterIndex + 1 for kFoldClassifier in kFoldClassifierStats])
     return np.mean(iterations)
@@ -227,6 +188,7 @@ def execute(classifier, trainLabels,
             databaseName, KFolds,
             hyperParamSearch, nIter, metrics,
             viewsIndices, randomState, labels):
+
     learningIndices, validationIndices, testIndicesMulticlass = classificationIndices
     if classifier.classifiersConfigs is None:
         metricsScores = getMetricsScores(metrics, trainLabels, testLabels,
diff --git a/Code/MonoMultiViewClassifiers/utils/MultiviewResultAnalysis.py b/Code/MonoMultiViewClassifiers/utils/MultiviewResultAnalysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..eed3ef9b73e5877792dcd81c9be104a8e273e07b
--- /dev/null
+++ b/Code/MonoMultiViewClassifiers/utils/MultiviewResultAnalysis.py
@@ -0,0 +1,45 @@
+from .. import Metrics
+
+# Author-Info
+__author__ = "Baptiste Bauvin"
+__status__ = "Prototype"  # Production, Development, Prototype
+
+def printMetricScore(metricScores, metrics):
+    metricScoreString = "\n\n"
+    for metric in metrics:
+        metricModule = getattr(Metrics, metric[0])
+        if metric[1] is not None:
+            metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
+        else:
+            metricKWARGS = {}
+        metricScoreString += "\tFor " + metricModule.getConfig(**metricKWARGS) + " : "
+        metricScoreString += "\n\t\t- Score on train : " + str(metricScores[metric[0]][0])
+        metricScoreString += "\n\t\t- Score on test : " + str(metricScores[metric[0]][1])
+        metricScoreString += "\n\n"
+    return metricScoreString
+
+
+def getTotalMetricScores(metric, trainLabels, testLabels, validationIndices, learningIndices, labels):
+    metricModule = getattr(Metrics, metric[0])
+    if metric[1] is not None:
+        metricKWARGS = dict((index, metricConfig) for index, metricConfig in enumerate(metric[1]))
+    else:
+        metricKWARGS = {}
+    try:
+        trainScore = metricModule.score(labels[learningIndices], trainLabels, **metricKWARGS)
+    except:
+        print(labels[learningIndices])
+        print(trainLabels)
+        import pdb;pdb.set_trace()
+    testScore = metricModule.score(labels[validationIndices], testLabels, **metricKWARGS)
+    return [trainScore, testScore]
+
+
+def getMetricsScores(metrics, trainLabels, testLabels,
+                     validationIndices, learningIndices, labels):
+    metricsScores = {}
+    for metric in metrics:
+        metricsScores[metric[0]] = getTotalMetricScores(metric, trainLabels, testLabels,
+                                                        validationIndices, learningIndices, labels)
+    return metricsScores
+
diff --git a/Code/Tests/Test_Metrics/test_accuracy_score.py b/Code/Tests/Test_Metrics/test_accuracy_score.py
index d370ce9122a5266ca3a5f072750ded76945d7ab6..b20f39441b8a23c7187b28d5efd9b7ecfc45319b 100644
--- a/Code/Tests/Test_Metrics/test_accuracy_score.py
+++ b/Code/Tests/Test_Metrics/test_accuracy_score.py
@@ -3,6 +3,9 @@ import os
 
 from ...MonoMultiViewClassifiers.Metrics import accuracy_score
 
+# Tester que chaque metrique a bien les bonnes fonctions qui renvoient bien les bons types d'outputs avec les bons types d'inputs
+# Faire de meme pour les differents classifeurs monovues et les differents classifeurs multivues
+
 
 class Test_accuracy_score(unittest.TestCase):
 
diff --git a/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py b/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
index e9879ed75bc20bbbdf8a9240d7b5e1b3eb79fbac..ac3666be0ff590d76bf40c42de09dd9988fda2bc 100644
--- a/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
+++ b/Code/Tests/Test_MonoView/test_ExecClassifMonoView.py
@@ -76,4 +76,67 @@ class Test_initTrainTest(unittest.TestCase):
                                                         np.array([252,235,344,48,474]),
                                                         np.array([270,189,445,174,445])]))
         np.testing.assert_array_equal(y_train, np.array([0,0,1,0,0]))
-        np.testing.assert_array_equal(y_test, np.array([1,1,0,0,0]))
\ No newline at end of file
+        np.testing.assert_array_equal(y_test, np.array([1,1,0,0,0]))
+
+class Test_getKWARGS(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.classifierModule = None
+        cls.hyperParamSearch = None
+        cls.nIter = 2
+        cls.CL_type = "string"
+        cls.X_train = np.zeros((10,20))
+        cls.y_train = np.zeros((10))
+        cls.randomState = np.random.RandomState(42)
+        cls.outputFileName = "test_file"
+        cls.KFolds = None
+        cls.nbCores = 1
+        cls.metrics = {"accuracy_score":""}
+        cls.kwargs = {}
+
+    def test_simple(cls):
+        clKWARGS = ExecClassifMonoView.getKWARGS(cls.classifierModule,
+                                                 cls.hyperParamSearch,
+                                                 cls.nIter,
+                                                 cls.CL_type,
+                                                 cls.X_train,
+                                                 cls.y_train,
+                                                 cls.randomState,
+                                                 cls.outputFileName,
+                                                 cls.KFolds,
+                                                 cls.nbCores,
+                                                 cls.metrics,
+                                                 cls.kwargs)
+        pass
+
+class Test_saveResults(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.stringAnalysis = "string analysis"
+        cls.outputFileName = "test_file"
+        cls.full_labels_pred = np.zeros(10)
+        cls.y_train_pred = np.ones(5)
+        cls.y_train = np.zeros(5)
+        cls.imagesAnalysis = {}
+
+    def test_simple(cls):
+        ExecClassifMonoView.saveResults(cls.stringAnalysis,
+                                        cls.outputFileName,
+                                        cls.full_labels_pred,
+                                        cls.y_train_pred,
+                                        cls.y_train,
+                                        cls.imagesAnalysis)
+        # Test if the files are created with the right content
+
+    def test_with_image_analysis(cls):
+        cls.imagesAnalysis = {"test_image":"image.png"} # Image to gen
+        ExecClassifMonoView.saveResults(cls.stringAnalysis,
+                                        cls.outputFileName,
+                                        cls.full_labels_pred,
+                                        cls.y_train_pred,
+                                        cls.y_train,
+                                        cls.imagesAnalysis)
+        # Test if the files are created with the right content
+