diff --git a/.gitignore b/.gitignore
index 372dd2963751bb864d3af936476bf6ed0140e5f9..5c20b7643b981502540fe3064e1bd6021359c6e4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,4 +23,6 @@ multiview_platform/examples/results/example_3/*
 multiview_platform/examples/results/example_4/*
 multiview_platform/examples/results/example_5/*
 multiview_platform/html_cov/
-multiview_platform/.coverage*
\ No newline at end of file
+multiview_platform/.coverage*
+.coverage*
+htmlcov/
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py
index f78955dcbe4b0155bdecfe07cbe2ff2e45e0a5b9..97aa6baa7ad3f1b6902c69eb4287a4005660f78e 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py
@@ -47,7 +47,7 @@ def get_example_errors(groud_truth, results):
 
 
 def publish_example_errors(example_errors, directory, databaseName,
-                           labels_names, example_ids, labels):
+                           labels_names, example_ids, labels): # pragma: no cover
     logging.debug("Start:\t Label analysis figure generation")
 
     base_file_name = os.path.join(directory, databaseName + "-" )
@@ -70,7 +70,7 @@ def publish_example_errors(example_errors, directory, databaseName,
 
 def publish_all_example_errors(iter_results, directory,
                                stats_iter,
-                               example_ids, labels):
+                               example_ids, labels): # pragma: no cover
     logging.debug(
         "Start:\t Global label analysis figure generation")
 
@@ -132,11 +132,7 @@ def gen_error_data(example_errors):
     data_2d = np.zeros((nb_examples, nb_classifiers))
     for classifierIndex, (classifier_name, error_on_examples) in enumerate(
             example_errors.items()):
-        try:
-            data_2d[:, classifierIndex] = error_on_examples
-        except:
-            import pdb;
-            pdb.set_trace()
+        data_2d[:, classifierIndex] = error_on_examples
     error_on_examples = np.sum(data_2d, axis=1) / nb_classifiers
     return nb_classifiers, nb_examples, classifiers_names, data_2d, error_on_examples
 
@@ -267,7 +263,7 @@ def plot_errors_bar(error_on_examples, nb_examples, file_name,
 
 
 
-def iter_cmap(statsIter):
+def iter_cmap(statsIter): # pragma: no cover
     r"""Used to generate a colormap that will have a tick for each iteration : the whiter the better.
 
     Parameters
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py
index 88392cd673ef4a701b8e796f513d27f47db6001d..e620a9340b47b05760706f72cab16ae208eeb053 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py
@@ -8,7 +8,7 @@ from .error_analysis import get_example_errors, publish_example_errors, publish_
 from .feature_importances import get_feature_importances, publish_feature_importances
 
 def analyze(results, stats_iter, benchmark_argument_dictionaries,
-                metrics, directory, example_ids, labels):
+                metrics, directory, example_ids, labels): # pragma: no cover
     """Used to analyze the results of the previous benchmarks"""
     data_base_name = benchmark_argument_dictionaries[0]["args"]["name"]
 
@@ -105,7 +105,7 @@ def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
 
 
 def analyze_all(iter_results, stats_iter, directory, data_base_name,
-                example_ids, label_names):
+                example_ids, label_names): # pragma: no cover
     """Used to format the results in order to plot the mean results on
     the iterations"""
     metrics_analysis, class_metrics_analysis, error_analysis, feature_importances, \
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py
index c30a1c6322547e156c9d20c104b68efccc07b2e9..459f664fb6231161e0e75a10ed3009e0dd27950c 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py
@@ -38,7 +38,7 @@ def get_feature_importances(result, feature_names=None):
     return feature_importances
 
 def publish_feature_importances(feature_importances, directory, database_name,
-                                feature_stds=None):
+                                feature_stds=None):  # pragma: no cover
     for view_name, feature_importance in feature_importances.items():
         if not os.path.exists(os.path.join(directory, "feature_importances")):
             os.mkdir(os.path.join(directory, "feature_importances"))
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py
index 94c50be925c2f60003e8b781c65d69031759329e..fff1e36511fd9ac1952ce6af6b5d7e801ff0728b 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py
@@ -67,7 +67,7 @@ def get_metrics_scores(metrics, results, label_names):
 
 
 def publish_metrics_graphs(metrics_scores, directory, database_name,
-                           labels_names, class_metric_scores):
+                           labels_names, class_metric_scores):  # pragma: no cover
     r"""Used to sort the results (names and both scores) in descending test
     score order.
 
@@ -114,7 +114,7 @@ def publish_metrics_graphs(metrics_scores, directory, database_name,
 
 def publish_all_metrics_scores(iter_results, class_iter_results, directory,
                                data_base_name, stats_iter, label_names,
-                               min_size=10):
+                               min_size=10): # pragma: no cover
     results = []
     secure_file_path(os.path.join(directory, "a"))
 
diff --git a/multiview_platform/tests/test_result_analysis/test_duration_analysis.py b/multiview_platform/tests/test_result_analysis/test_duration_analysis.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..efe6c68d792fe1d638821ea70f649a342f41a664 100644
--- a/multiview_platform/tests/test_result_analysis/test_duration_analysis.py
+++ b/multiview_platform/tests/test_result_analysis/test_duration_analysis.py
@@ -0,0 +1,41 @@
+import unittest
+import numpy as np
+import pandas as pd
+
+from multiview_platform.mono_multi_view_classifiers.result_analysis import duration_analysis
+
+class FakeClassifierResult:
+
+    def __init__(self, i=0):
+        self.i=i
+        if i == 0:
+            self.hps_duration = 10
+            self.fit_duration = 12
+            self.pred_duration = 15
+        else:
+            self.hps_duration = 1
+            self.fit_duration = 2
+            self.pred_duration = 5
+
+
+    def get_classifier_name(self):
+        if self.i == 0:
+            return 'test1'
+        else:
+            return 'test2'
+
+
+
+class Test_get_duration(unittest.TestCase):
+
+    def test_simple(self):
+        results = [FakeClassifierResult(), FakeClassifierResult(i=1)]
+        durs = duration_analysis.get_duration(results)
+        pd.testing.assert_frame_equal(durs,
+                                      pd.DataFrame(index=['test1', 'test2'],
+                                                   columns=['hps', 'fit', 'pred'],
+                                                   data=np.array([np.array([10,12,15]),
+                                                                  np.array([1,2,5])]),
+                                                   dtype=object))
+
+
diff --git a/multiview_platform/tests/test_result_analysis/test_execution.py b/multiview_platform/tests/test_result_analysis/test_execution.py
index cafd6c90e2c73402d1ad68c4121580ff1d8dd86e..f42f818c48a2e774c23a51b75542a5b9b1cd76f9 100644
--- a/multiview_platform/tests/test_result_analysis/test_execution.py
+++ b/multiview_platform/tests/test_result_analysis/test_execution.py
@@ -1,11 +1,26 @@
 import unittest
 import numpy as np
 import pandas as pd
+import os
 
 from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
 from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
 
-from multiview_platform.mono_multi_view_classifiers.result_analysis.execution import format_previous_results, get_arguments
+from multiview_platform.mono_multi_view_classifiers.result_analysis.execution import format_previous_results, get_arguments, analyze_iterations
+from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
+
+
+class FakeClassifierResult:
+
+    def __init__(self, i=1):
+        self.classifier_name='test'+str(i)
+        self.full_labels_pred = np.array([0,1,1,2,1])
+        self.hps_duration=i
+        self.fit_duration=i
+        self.pred_duration=i
+
+    def get_classifier_name(self):
+        return self.classifier_name
 
 class Test_format_previous_results(unittest.TestCase):
 
@@ -82,3 +97,43 @@ class Test_get_arguments(unittest.TestCase):
     def test_benchmark_wanted(self):
         argument_dict = get_arguments(self.benchamrk_argument_dictionaries, "good_flag")
         self.assertTrue(argument_dict["valid"])
+
+
+class Test_analyze_iterations(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        os.mkdir(tmp_path)
+        cls.results = [[0, [FakeClassifierResult(), FakeClassifierResult(i=2)], []], [1, [FakeClassifierResult(), FakeClassifierResult(i=2)], []]]
+        cls.benchmark_argument_dictionaries = [{"labels_dictionary":{0:"zero",1:"one",2:"two"}, "flag":0, "directory":tmp_path, "args":{"name":"test_dataset"}},{"labels_dictionary":{0:"zero",1:"one",2:"two"}, "flag":1, "directory":tmp_path, "args":{"name":"test_dataset"}} ]
+        cls.stats_iter = 2
+        cls.metrics = {}
+        cls.example_ids = ['ex1', 'ex5','ex4','ex3','ex2',]
+        cls.labels = np.array([0,1,2,1,1])
+
+
+    @classmethod
+    def tearDownClass(cls):
+        rm_tmp()
+
+    def test_simple(self):
+        analysis = analyze_iterations(self.results,
+                                      self.benchmark_argument_dictionaries,
+                                      self.stats_iter,
+                                      self.metrics,
+                                      self.example_ids,
+                                      self.labels)
+        res, iter_res, tracebacks, labels_names = analysis
+        self.assertEqual(labels_names, ['zero', 'one', 'two'])
+
+        self.assertEqual(iter_res['class_metrics_scores'], [{}, {}])
+
+        pd.testing.assert_frame_equal(iter_res['durations'][0], pd.DataFrame(index=['test1','test2'],
+                                 columns=['hps', 'fit', 'pred'],
+                            data=np.array([1,1,1,2,2,2]).reshape((2,3)), dtype=object))
+        np.testing.assert_array_equal(iter_res['example_errors'][0]['test1'], np.array([1, 1, 0, 0, 1]))
+        self.assertEqual(iter_res["feature_importances"], [{},{}])
+        np.testing.assert_array_equal(iter_res['labels'], np.array([0, 1, 2, 1, 1]))
+        self.assertEqual(iter_res['metrics_scores'], [{},{}])
+
+
diff --git a/multiview_platform/tests/test_result_analysis/test_feature_importances.py b/multiview_platform/tests/test_result_analysis/test_feature_importances.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2a69e88c2bcbb036611d4a3006a69ac144b22004 100644
--- a/multiview_platform/tests/test_result_analysis/test_feature_importances.py
+++ b/multiview_platform/tests/test_result_analysis/test_feature_importances.py
@@ -0,0 +1,36 @@
+import unittest
+import numpy as np
+import pandas as pd
+
+from multiview_platform.mono_multi_view_classifiers.result_analysis import feature_importances
+from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
+
+class FakeClassifier:
+    def __init__(self, i=0):
+        self.feature_importances_ = [i, i+1]
+
+class FakeClassifierResult(MonoviewResult):
+
+    def __init__(self, i=0):
+        self.i=i
+        self.hps_duration = i*10
+        self.fit_duration = (i+2)*10
+        self.pred_duration = (i+5)*10
+        self.clf = FakeClassifier(i)
+        self.view_name = 'testview'+str(i)
+        self.classifier_name = "test"+str(i)
+
+    def get_classifier_name(self):
+        return self.classifier_name
+
+
+
+class Test_get_duration(unittest.TestCase):
+
+    def test_simple(self):
+        results = [FakeClassifierResult(), FakeClassifierResult(i=1)]
+        feat_importance = feature_importances.get_feature_importances(results)
+        pd.testing.assert_frame_equal(feat_importance["testview1"],
+                                      pd.DataFrame(index=None,columns=['test1'],
+                                                   data=np.array([1,2]).reshape((2,1)),
+                                                   ))
\ No newline at end of file
diff --git a/multiview_platform/tests/test_result_analysis/test_metric_analysis.py b/multiview_platform/tests/test_result_analysis/test_metric_analysis.py
index 800b916e87cafe07b2fe66717f3778a9cfe6a4f3..a34f06a462784b1358b0af57c48df95da62fbd82 100644
--- a/multiview_platform/tests/test_result_analysis/test_metric_analysis.py
+++ b/multiview_platform/tests/test_result_analysis/test_metric_analysis.py
@@ -6,7 +6,7 @@ import os
 from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
 from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
 
-from multiview_platform.mono_multi_view_classifiers.result_analysis.metric_analysis import get_metrics_scores, init_plot
+from multiview_platform.mono_multi_view_classifiers.result_analysis.metric_analysis import get_metrics_scores, init_plot, get_fig_size, sort_by_test_score
 
 class Test_get_metrics_scores(unittest.TestCase):
 
@@ -150,4 +150,31 @@ class Test_init_plot(unittest.TestCase):
         np.testing.assert_array_equal(classifier_names, np.array(["dt-1", "mv"]))
         self.assertEqual(nb_results, 2)
         self.assertEqual(results, [["dt-1", "acc", data[1,0], 0.0, data[1,0]],
-                                   ["mv", "acc", data[1,1], 0.0, data[1,1]]])
\ No newline at end of file
+                                   ["mv", "acc", data[1,1], 0.0, data[1,1]]])
+
+
+class Test_small_func(unittest.TestCase):
+
+    def test_fig_size(self):
+        kw, width = get_fig_size(5)
+        self.assertEqual(kw, {"figsize":(15,5)})
+        self.assertEqual(width, 0.35)
+        kw, width = get_fig_size(100)
+        self.assertEqual(kw, {"figsize": (100, 100/3)})
+        self.assertEqual(width, 0.35)
+
+    def test_sort_by_test_scores(self):
+        train_scores = np.array([1,2,3,4])
+        test_scores = np.array([4, 3, 2, 1])
+        train_STDs = np.array([1, 2, 3, 4])
+        test_STDs = np.array([1, 2, 3, 4])
+        names = np.array(['1', '2', '3', '4'])
+        sorted_names, sorted_train_scores, \
+        sorted_test_scores, sorted_train_STDs, \
+        sorted_test_STDs = sort_by_test_score(train_scores, test_scores,
+                                              names, train_STDs, test_STDs)
+        np.testing.assert_array_equal(sorted_names, np.array(['4', '3', '2', '1']))
+        np.testing.assert_array_equal(sorted_test_scores, [1, 2, 3, 4])
+        np.testing.assert_array_equal(sorted_test_STDs, [4, 3, 2, 1])
+        np.testing.assert_array_equal(sorted_train_scores, [4, 3, 2, 1])
+        np.testing.assert_array_equal(sorted_train_STDs, [4, 3, 2, 1])
\ No newline at end of file
diff --git a/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py b/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..61296f85e01400c823ecf7bc384a8f2751d20f4a 100644
--- a/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py
+++ b/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py
@@ -0,0 +1,47 @@
+import unittest
+import numpy as np
+import pandas as pd
+import os
+
+from multiview_platform.mono_multi_view_classifiers.result_analysis import tracebacks_analysis
+from multiview_platform.tests.utils import tmp_path, rm_tmp
+
+class FakeClassifierResult:
+
+    def __init__(self, i=0):
+        self.i=i
+        if i == 0:
+            self.hps_duration = 10
+            self.fit_duration = 12
+            self.pred_duration = 15
+        else:
+            self.hps_duration = 1
+            self.fit_duration = 2
+            self.pred_duration = 5
+
+
+    def get_classifier_name(self):
+        if self.i == 0:
+            return 'test1'
+        else:
+            return 'test2'
+
+
+
+class Test_funcs(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        os.mkdir(tmp_path)
+        cls.res_file = open(os.path.join(tmp_path,"tmp.txt"), "w")
+
+    @classmethod
+    def tearDownClass(cls):
+        rm_tmp()
+
+    def test_save_dict_to_text(self):
+        keys = tracebacks_analysis.save_dict_to_text({"a":"i", "b":"j"}, self.res_file)
+        self.res_file.close()
+        self.assertEqual(list(keys),["a", "b"])
+        with open(os.path.join(tmp_path,"tmp.txt"), 'r') as res_file:
+            self.assertEqual(res_file.read(), 'Failed algorithms : \n\ta,\n\tb.\n\n\na\n\ni\n\n\nb\n\nj\n\n\n')