diff --git a/config_files/config_test.yml b/config_files/config_test.yml
index 8476b0e445d290f1f25b10898e39a842b9014d9e..8135f0e500b8135f75f35fd0371b817374b22a02 100644
--- a/config_files/config_test.yml
+++ b/config_files/config_test.yml
@@ -22,8 +22,8 @@ Classification:
   nb_folds: 2
   nb_class: 2
   classes:
-  type: ["multiview"]
-  algos_monoview: ["adaboost",]
+  type: ["multiview", "monoview"]
+  algos_monoview: ["adaboost","decision_tree"]
   algos_multiview: ["svm_jumbo_fusion"]
   stats_iter: 1
   metrics: ["accuracy_score", "f1_score"]
diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
index 68e4de7caf89e9bcc4bfefba2e9abc1f36246a0c..5206ed13db65d43c40f27dc7cc13d8b5f53b3a90 100644
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
@@ -4,6 +4,7 @@ import math
 import os
 import pkgutil
 import time
+import traceback
 
 import matplotlib
 import itertools
@@ -528,114 +529,114 @@ def benchmark_init(directory, classification_indices, labels, labels_dictionary,
     return results_monoview, labels_names
 
 
-def exec_one_benchmark(core_index=-1, labels_dictionary=None, directory=None,
-                     classification_indices=None, args=None,
-                     k_folds=None, random_state=None, hyper_param_search=None,
-                     metrics=None, argument_dictionaries=None,
-                     benchmark=None, views=None, views_indices=None, flag=None,
-                     labels=None,
-                     exec_monoview_multicore=exec_monoview_multicore,
-                     exec_multiview_multicore=exec_multiview_multicore,):
-    """Used to run a benchmark using one core. ExecMonoview_multicore, initMultiviewArguments and
-     exec_multiview_multicore args are only used for tests"""
-
-    results_monoview, labels_names = benchmark_init(directory,
-                                                    classification_indices, labels,
-                                                    labels_dictionary, k_folds)
-
-    logging.debug("Start:\t monoview benchmark")
-    results_monoview += [
-        exec_monoview_multicore(directory, args["Base"]["name"], labels_names,
-                               classification_indices, k_folds,
-                               core_index, args["Base"]["type"], args["Base"]["pathf"], random_state,
-                               labels,
-                               hyper_param_search=hyper_param_search,
-                               metrics=metrics,
-                               n_iter=args["Classification"]["hps_iter"], **argument)
-        for argument in argument_dictionaries["Monoview"]]
-    logging.debug("Done:\t monoview benchmark")
-
-
-    logging.debug("Start:\t multiview benchmark")
-    results_multiview = [
-        exec_multiview_multicore(directory, core_index, args["Base"]["name"],
-                                classification_indices, k_folds, args["Base"]["type"],
-                                args["Base"]["pathf"], labels_dictionary, random_state,
-                                labels, hyper_param_search=hyper_param_search,
-                                metrics=metrics, n_iter=args["Classification"]["hps_iter"],
-                                **arguments)
-        for arguments in argument_dictionaries["multiview"]]
-    logging.debug("Done:\t multiview benchmark")
-
-    return [flag, results_monoview + results_multiview]
-
-
-def exec_one_benchmark_multicore(nb_cores=-1, labels_dictionary=None,
-                                 directory=None, classification_indices=None,
-                                 args=None,
-                                 k_folds=None, random_state=None,
-                                 hyper_param_search=None, metrics=None,
-                                 argument_dictionaries=None,
-                                 benchmark=None, views=None, views_indices=None,
-                                 flag=None, labels=None,
-                                 exec_monoview_multicore=exec_monoview_multicore,
-                                 exec_multiview_multicore=exec_multiview_multicore,):
-    """Used to run a benchmark using multiple cores. ExecMonoview_multicore, initMultiviewArguments and
-     exec_multiview_multicore args are only used for tests"""
-
-    results_monoview, labels_names = benchmark_init(directory,
-                                                    classification_indices, labels,
-                                                    labels_dictionary, k_folds)
-
-    logging.debug("Start:\t monoview benchmark")
-    nb_experiments = len(argument_dictionaries["monoview"])
-    nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
-    for step_index in range(nb_multicore_to_do):
-        results_monoview += (Parallel(n_jobs=nb_cores)(
-            delayed(exec_monoview_multicore)(directory, args["Base"]["name"], labels_names,
-                                            classification_indices, k_folds,
-                                            core_index, args["Base"]["type"], args["Base"]["pathf"],
-                                            random_state, labels,
-                                            hyper_param_search=hyper_param_search,
-                                            metrics=metrics,
-                                            n_iter=args["Classification"]["hps_iter"],
-                                            **argument_dictionaries["monoview"][
-                                            core_index + step_index * nb_cores])
-            for core_index in
-            range(min(nb_cores, nb_experiments - step_index * nb_cores))))
-    logging.debug("Done:\t monoview benchmark")
-
-    logging.debug("Start:\t multiview arguments initialization")
-    # argument_dictionaries = initMultiviewArguments(args, benchmark, views,
-    #                                               views_indices,
-    #                                               argument_dictionaries,
-    #                                               random_state, directory,
-    #                                               resultsMonoview,
-    #                                               classification_indices)
-    logging.debug("Done:\t multiview arguments initialization")
-
-    logging.debug("Start:\t multiview benchmark")
-    results_multiview = []
-    nb_experiments = len(argument_dictionaries["multiview"])
-    nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
-    for step_index in range(nb_multicore_to_do):
-        results_multiview += Parallel(n_jobs=nb_cores)(
-            delayed(exec_multiview_multicore)(directory, core_index, args["Base"]["name"],
-                                              classification_indices, k_folds,
-                                              args["Base"]["type"], args["Base"]["pathf"],
-                                              labels_dictionary, random_state,
-                                              labels,
-                                              hyper_param_search=hyper_param_search,
-                                              metrics=metrics,
-                                              n_iter=args["Classification"]["hps_iter"],
-                                              **
-                                             argument_dictionaries["multiview"][
-                                                 step_index * nb_cores + core_index])
-            for core_index in
-            range(min(nb_cores, nb_experiments - step_index * nb_cores)))
-    logging.debug("Done:\t multiview benchmark")
-
-    return [flag, results_monoview + results_multiview]
+# def exec_one_benchmark(core_index=-1, labels_dictionary=None, directory=None,
+#                      classification_indices=None, args=None,
+#                      k_folds=None, random_state=None, hyper_param_search=None,
+#                      metrics=None, argument_dictionaries=None,
+#                      benchmark=None, views=None, views_indices=None, flag=None,
+#                      labels=None,
+#                      exec_monoview_multicore=exec_monoview_multicore,
+#                      exec_multiview_multicore=exec_multiview_multicore,):
+#     """Used to run a benchmark using one core. ExecMonoview_multicore, initMultiviewArguments and
+#      exec_multiview_multicore args are only used for tests"""
+#
+#     results_monoview, labels_names = benchmark_init(directory,
+#                                                     classification_indices, labels,
+#                                                     labels_dictionary, k_folds)
+#
+#     logging.debug("Start:\t monoview benchmark")
+#     results_monoview += [
+#         exec_monoview_multicore(directory, args["Base"]["name"], labels_names,
+#                                classification_indices, k_folds,
+#                                core_index, args["Base"]["type"], args["Base"]["pathf"], random_state,
+#                                labels,
+#                                hyper_param_search=hyper_param_search,
+#                                metrics=metrics,
+#                                n_iter=args["Classification"]["hps_iter"], **argument)
+#         for argument in argument_dictionaries["Monoview"]]
+#     logging.debug("Done:\t monoview benchmark")
+#
+#
+#     logging.debug("Start:\t multiview benchmark")
+#     results_multiview = [
+#         exec_multiview_multicore(directory, core_index, args["Base"]["name"],
+#                                 classification_indices, k_folds, args["Base"]["type"],
+#                                 args["Base"]["pathf"], labels_dictionary, random_state,
+#                                 labels, hyper_param_search=hyper_param_search,
+#                                 metrics=metrics, n_iter=args["Classification"]["hps_iter"],
+#                                 **arguments)
+#         for arguments in argument_dictionaries["multiview"]]
+#     logging.debug("Done:\t multiview benchmark")
+#
+#     return [flag, results_monoview + results_multiview]
+#
+#
+# def exec_one_benchmark_multicore(nb_cores=-1, labels_dictionary=None,
+#                                  directory=None, classification_indices=None,
+#                                  args=None,
+#                                  k_folds=None, random_state=None,
+#                                  hyper_param_search=None, metrics=None,
+#                                  argument_dictionaries=None,
+#                                  benchmark=None, views=None, views_indices=None,
+#                                  flag=None, labels=None,
+#                                  exec_monoview_multicore=exec_monoview_multicore,
+#                                  exec_multiview_multicore=exec_multiview_multicore,):
+#     """Used to run a benchmark using multiple cores. ExecMonoview_multicore, initMultiviewArguments and
+#      exec_multiview_multicore args are only used for tests"""
+#
+#     results_monoview, labels_names = benchmark_init(directory,
+#                                                     classification_indices, labels,
+#                                                     labels_dictionary, k_folds)
+#
+#     logging.debug("Start:\t monoview benchmark")
+#     nb_experiments = len(argument_dictionaries["monoview"])
+#     nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
+#     for step_index in range(nb_multicore_to_do):
+#         results_monoview += (Parallel(n_jobs=nb_cores)(
+#             delayed(exec_monoview_multicore)(directory, args["Base"]["name"], labels_names,
+#                                             classification_indices, k_folds,
+#                                             core_index, args["Base"]["type"], args["Base"]["pathf"],
+#                                             random_state, labels,
+#                                             hyper_param_search=hyper_param_search,
+#                                             metrics=metrics,
+#                                             n_iter=args["Classification"]["hps_iter"],
+#                                             **argument_dictionaries["monoview"][
+#                                             core_index + step_index * nb_cores])
+#             for core_index in
+#             range(min(nb_cores, nb_experiments - step_index * nb_cores))))
+#     logging.debug("Done:\t monoview benchmark")
+#
+#     logging.debug("Start:\t multiview arguments initialization")
+#     # argument_dictionaries = initMultiviewArguments(args, benchmark, views,
+#     #                                               views_indices,
+#     #                                               argument_dictionaries,
+#     #                                               random_state, directory,
+#     #                                               resultsMonoview,
+#     #                                               classification_indices)
+#     logging.debug("Done:\t multiview arguments initialization")
+#
+#     logging.debug("Start:\t multiview benchmark")
+#     results_multiview = []
+#     nb_experiments = len(argument_dictionaries["multiview"])
+#     nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
+#     for step_index in range(nb_multicore_to_do):
+#         results_multiview += Parallel(n_jobs=nb_cores)(
+#             delayed(exec_multiview_multicore)(directory, core_index, args["Base"]["name"],
+#                                               classification_indices, k_folds,
+#                                               args["Base"]["type"], args["Base"]["pathf"],
+#                                               labels_dictionary, random_state,
+#                                               labels,
+#                                               hyper_param_search=hyper_param_search,
+#                                               metrics=metrics,
+#                                               n_iter=args["Classification"]["hps_iter"],
+#                                               **
+#                                              argument_dictionaries["multiview"][
+#                                                  step_index * nb_cores + core_index])
+#             for core_index in
+#             range(min(nb_cores, nb_experiments - step_index * nb_cores)))
+#     logging.debug("Done:\t multiview benchmark")
+#
+#     return [flag, results_monoview + results_multiview]
 
 
 def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
@@ -651,15 +652,20 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
                                                  labels_dictionary, k_folds)
     logging.getLogger('matplotlib.font_manager').disabled = True
     logging.debug("Start:\t monoview benchmark")
+    traceback_outputs = {}
     for arguments in argument_dictionaries["monoview"]:
-        X = dataset_var.get_v(arguments["view_index"])
-        Y = labels
-        results_monoview += [
-            exec_monoview(directory, X, Y, args["Base"]["name"], labels_names,
-                          classification_indices, k_folds,
-                          1, args["Base"]["type"], args["Base"]["pathf"], random_state,
-                          hyper_param_search=hyper_param_search, metrics=metrics,
-                          n_iter=args["Classification"]["hps_iter"], **arguments)]
+        try:
+            X = dataset_var.get_v(arguments["view_index"])
+            Y = labels
+            results_monoview += [
+                exec_monoview(directory, X, Y, args["Base"]["name"], labels_names,
+                              classification_indices, k_folds,
+                              1, args["Base"]["type"], args["Base"]["pathf"], random_state,
+                              hyper_param_search=hyper_param_search, metrics=metrics,
+                              n_iter=args["Classification"]["hps_iter"], **arguments)]
+        except:
+            traceback_outputs[arguments["classifier_name"]+"-"+arguments["view_name"]] = traceback.format_exc()
+
     logging.debug("Done:\t monoview benchmark")
 
     logging.debug("Start:\t multiview arguments initialization")
@@ -675,15 +681,18 @@ def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
     logging.debug("Start:\t multiview benchmark")
     results_multiview = []
     for arguments in argument_dictionaries["multiview"]:
-        results_multiview += [
-            exec_multiview(directory, dataset_var, args["Base"]["name"], classification_indices,
-                          k_folds, 1, args["Base"]["type"],
-                          args["Base"]["pathf"], labels_dictionary, random_state, labels,
-                          hyper_param_search=hyper_param_search,
-                          metrics=metrics, n_iter=args["Classification"]["hps_iter"], **arguments)]
+        try:
+            results_multiview += [
+                exec_multiview(directory, dataset_var, args["Base"]["name"], classification_indices,
+                              k_folds, 1, args["Base"]["type"],
+                              args["Base"]["pathf"], labels_dictionary, random_state, labels,
+                              hyper_param_search=hyper_param_search,
+                              metrics=metrics, n_iter=args["Classification"]["hps_iter"], **arguments)]
+        except:
+            traceback_outputs[arguments["classifier_name"]] = traceback.format_exc()
     logging.debug("Done:\t multiview benchmark")
 
-    return [flag, results_monoview + results_multiview]
+    return [flag, results_monoview + results_multiview, traceback_outputs]
 
 
 def exec_benchmark(nb_cores, stats_iter, nb_multiclass,
@@ -691,8 +700,8 @@ def exec_benchmark(nb_cores, stats_iter, nb_multiclass,
                    directories,
                    directory, multi_class_labels, metrics, labels_dictionary,
                    nb_labels, dataset_var,
-                   exec_one_benchmark=exec_one_benchmark,
-                   exec_one_benchmark_multicore=exec_one_benchmark_multicore,
+                   # exec_one_benchmark=exec_one_benchmark,
+                   # exec_one_benchmark_multicore=exec_one_benchmark_multicore,
                    exec_one_benchmark_mono_core=exec_one_benchmark_mono_core,
                    get_results=get_results, delete=delete_HDF5):
     r"""Used to execute the needed benchmark(s) on multicore or mono-core functions.
@@ -737,24 +746,24 @@ def exec_benchmark(nb_cores, stats_iter, nb_multiclass,
     """
     logging.debug("Start:\t Executing all the needed biclass benchmarks")
     results = []
-    if nb_cores > 1:
-        if stats_iter > 1 or nb_multiclass > 1:
-            nb_exps_to_do = len(benchmark_arguments_dictionaries)
-            nb_multicore_to_do = range(int(math.ceil(float(nb_exps_to_do) / nb_cores)))
-            for step_index in nb_multicore_to_do:
-                results += (Parallel(n_jobs=nb_cores)(delayed(exec_one_benchmark)
-                                                     (core_index=core_index,
-                                                      **
-                                                      benchmark_arguments_dictionaries[
-                                                          core_index + step_index * nb_cores])
-                                                     for core_index in range(
-                    min(nb_cores, nb_exps_to_do - step_index * nb_cores))))
-        else:
-            results += [exec_one_benchmark_multicore(nb_cores=nb_cores, **
-            benchmark_arguments_dictionaries[0])]
-    else:
-        for arguments in benchmark_arguments_dictionaries:
-            results += [exec_one_benchmark_mono_core(dataset_var=dataset_var, **arguments)]
+    # if nb_cores > 1:
+    #     if stats_iter > 1 or nb_multiclass > 1:
+    #         nb_exps_to_do = len(benchmark_arguments_dictionaries)
+    #         nb_multicore_to_do = range(int(math.ceil(float(nb_exps_to_do) / nb_cores)))
+    #         for step_index in nb_multicore_to_do:
+    #             results += (Parallel(n_jobs=nb_cores)(delayed(exec_one_benchmark)
+    #                                                  (core_index=core_index,
+    #                                                   **
+    #                                                   benchmark_arguments_dictionaries[
+    #                                                       core_index + step_index * nb_cores])
+    #                                                  for core_index in range(
+    #                 min(nb_cores, nb_exps_to_do - step_index * nb_cores))))
+    #     else:
+    #         results += [exec_one_benchmark_multicore(nb_cores=nb_cores, **
+    #         benchmark_arguments_dictionaries[0])]
+    # else:
+    for arguments in benchmark_arguments_dictionaries:
+        results += [exec_one_benchmark_mono_core(dataset_var=dataset_var, **arguments)]
     logging.debug("Done:\t Executing all the needed biclass benchmarks")
 
     # Do everything with flagging
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
index e2f18a0af4c2396246fe7bee3dfc7f388b5fbabb..cb360ad64eeb81bafb3eed86b76bfce1a8ab141b 100644
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
+++ b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
@@ -52,7 +52,7 @@ def exec_monoview(directory, X, Y, name, labels_names, classificationIndices,
     logging.debug("Start:\t Loading data")
     kwargs, \
     t_start, \
-    feat, \
+    view_name, \
     classifier_name, \
     X, \
     learningRate, \
@@ -62,8 +62,8 @@ def exec_monoview(directory, X, Y, name, labels_names, classificationIndices,
     logging.debug("Done:\t Loading data")
 
     logging.debug(
-        "Info:\t Classification - Database:" + str(name) + " Feature:" + str(
-            feat) + " train ratio:"
+        "Info:\t Classification - Database:" + str(name) + " View:" + str(
+            view_name) + " train ratio:"
         + str(learningRate) + ", CrossValidation k-folds: " + str(
             KFolds.n_splits) + ", cores:"
         + str(nbCores) + ", algorithm : " + classifier_name)
@@ -120,7 +120,7 @@ def exec_monoview(directory, X, Y, name, labels_names, classificationIndices,
     stringAnalysis, \
     imagesAnalysis, \
     metricsScores = execute(name, classificationIndices, KFolds, nbCores,
-                            hyper_parameter_search, metrics, n_iter, feat, classifier_name,
+                            hyper_parameter_search, metrics, n_iter, view_name, classifier_name,
                             clKWARGS, labels_names, X.shape,
                             y_train, y_train_pred, y_test, y_test_pred, t_end,
                             randomState, classifier, outputFileName)
@@ -135,10 +135,10 @@ def exec_monoview(directory, X, Y, name, labels_names, classificationIndices,
     viewIndex = args["view_index"]
     if testFoldsPreds is None:
         testFoldsPreds = y_train_pred
-    return monoview_utils.MonoviewResult(viewIndex, classifier_name, feat, metricsScores,
+    return monoview_utils.MonoviewResult(viewIndex, classifier_name, view_name, metricsScores,
                                          full_pred, clKWARGS,
                                          y_test_multiclass_pred, testFoldsPreds, classifier, X_train.shape[1])
-    # return viewIndex, [CL_type, feat, metricsScores, full_labels_pred, clKWARGS, y_test_multiclass_pred, testFoldsPreds]
+    # return viewIndex, [CL_type, view_name, metricsScores, full_labels_pred, clKWARGS, y_test_multiclass_pred, testFoldsPreds]
 
 
 def initConstants(args, X, classificationIndices, labels_names,
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
index 5cb0318d6d1ac691799645da1c9157b2e6d2f43f..606f6a8196de3067f0547d490bbde9a38eabe448 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
@@ -3,6 +3,7 @@ import errno
 import logging
 import os
 import time
+import yaml
 
 import matplotlib as mpl
 from matplotlib.patches import Patch
@@ -14,12 +15,23 @@ import plotly
 
 # Import own Modules
 from .monoview.monoview_utils import MonoviewResult
+from . import metrics
 from .multiview.multiview_utils import MultiviewResult
 
 # Author-Info
 __author__ = "Baptiste Bauvin"
 __status__ = "Prototype"  # Production, Development, Prototype
 
+def save_dict_to_text(dictionnary, output_file):
+    #TODO : smarter way must exist
+    output_file.write("Failed algorithms : \n\t"+ ",\n\t".join(dictionnary.keys())+".\n\n\n")
+    for key, value in dictionnary.items():
+        output_file.write(key)
+        output_file.write("\n\n")
+        output_file.write(value)
+        output_file.write("\n\n\n")
+    return dictionnary.keys()
+
 
 def plot_results_noise(directory, noise_results, metric_to_plot, name, width=0.1):
     avail_colors = ["tab:blue", "tab:orange", "tab:brown", "tab:gray",
@@ -647,6 +659,20 @@ def get_feature_importances(result, feature_names=None):
     return feature_importances
 
 
+def publish_tracebacks(directory, database_name, labels_names, tracebacks, flag):
+    if tracebacks:
+        with open(os.path.join(directory, time.strftime(
+                "%Y_%m_%d-%H_%M_%S") + "-" + database_name + "-" + "_vs_".join(
+                labels_names) + "-iter"+str(flag[0])+"-"+str(flag[1][0])+"vs"+
+                str(flag[1][1])+"tacebacks.txt"), "w") as traceback_file:
+            failed_list = save_dict_to_text(tracebacks, traceback_file)
+        flagged_list = [_ + "-iter"+str(flag[0])+"-"+str(flag[1][0])+"vs"+
+                        str(flag[1][1]) for _ in failed_list]
+    else:
+        flagged_list = {}
+    return flagged_list
+
+
 def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metrics, example_ids):
     r"""Used to extract and format the results of the different biclass experimentations performed.
 
@@ -675,8 +701,9 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
     """
     logging.debug("Srart:\t Analzing all biclass resuls")
     biclass_results = {}
+    flagged_tracebacks_list = []
 
-    for flag, result in results:
+    for flag, result, tracebacks in results:
         iteridex, [classifierPositive, classifierNegative] = flag
 
         arguments = get_arguments(benchmark_argument_dictionaries, flag)
@@ -696,6 +723,10 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
         publishExampleErrors(example_errors, directory, database_name,
                              labels_names, example_ids, arguments["labels"])
         publish_feature_importances(feature_importances, directory, database_name, labels_names)
+
+        flagged_tracebacks_list += publish_tracebacks(directory, database_name, labels_names, tracebacks, flag)
+
+
         if not str(classifierPositive) + str(classifierNegative) in biclass_results:
             biclass_results[str(classifierPositive) + str(classifierNegative)] = {}
             biclass_results[str(classifierPositive) + str(classifierNegative)][
@@ -709,16 +740,17 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
         biclass_results[str(classifierPositive) + str(classifierNegative)]["feature_importances"][iteridex] = feature_importances
 
     logging.debug("Done:\t Analzing all biclass resuls")
-    return results, biclass_results
 
+    return results, biclass_results, flagged_tracebacks_list
 
-def gen_metrics_scores_multiclass(results, true_labels, metrics,
+
+def gen_metrics_scores_multiclass(results, true_labels, metrics_list,
                                   arguments_dictionaries):
     """Used to add all the metrics scores to the multiclass result structure  for each clf and each iteration"""
 
     logging.debug("Start:\t Getting multiclass scores for each metric")
 
-    for metric in metrics:
+    for metric in metrics_list:
         metric_module = getattr(metrics, metric[0])
         for iter_index, iter_results in enumerate(results):
 
@@ -823,7 +855,7 @@ def analyzeMulticlass(results, stats_iter, benchmark_argument_dictionaries,
     """Used to transform one versus one results in multiclass results and to publish it"""
     multiclass_results = [{} for _ in range(stats_iter)]
 
-    for flag, result in results:
+    for flag, result, tracebacks in results:
         iter_index = flag[0]
         classifierPositive = flag[1][0]
         classifierNegative = flag[1][1]
@@ -872,6 +904,7 @@ def analyzeMulticlass(results, stats_iter, benchmark_argument_dictionaries,
     publishMulticlassExmapleErrors(multiclass_results, directories,
                                    benchmark_argument_dictionaries[0][
                                        "args"].name, example_ids)
+
     return results, multiclass_results
 
 
@@ -1160,6 +1193,12 @@ def analyze_iter_multiclass(multiclass_results, directory, stats_iter, metrics,
     return results
 
 
+def save_failed(failed_list, directory):
+    with open(os.path.join(directory, "failed_algorithms.txt"), "w") as failed_file:
+        failed_file.write("The following algorithms sent an error, the tracebacks are stored in the coressponding directory :\n")
+        failed_file.write(", \n".join(failed_list)+".")
+
+
 def get_results(results, stats_iter, nb_multiclass, benchmark_argument_dictionaries,
                multiclass_labels, metrics,
                classification_indices, directories, directory, labels_dictionary,
@@ -1167,8 +1206,12 @@ def get_results(results, stats_iter, nb_multiclass, benchmark_argument_dictionar
 
     """Used to analyze the results of the previous benchmarks"""
     data_base_name = benchmark_argument_dictionaries[0]["args"]["Base"]["name"]
-    results_means_std, biclass_results = analyze_biclass(results, benchmark_argument_dictionaries,
+
+
+    results_means_std, biclass_results, flagged_failed = analyze_biclass(results, benchmark_argument_dictionaries,
                                          stats_iter, metrics, example_ids)
+    if flagged_failed:
+        save_failed(flagged_failed, directory)
 
     if nb_multiclass > 1:
         results_means_std, multiclass_results = analyzeMulticlass(results, stats_iter,
diff --git a/multiview_platform/tests/test_ExecClassif.py b/multiview_platform/tests/test_ExecClassif.py
index ad86757828f53a732ded8785cbf0f199bbbdbc9d..62d3b1cd55721f3819f5474a4becb056c42fc7b9 100644
--- a/multiview_platform/tests/test_ExecClassif.py
+++ b/multiview_platform/tests/test_ExecClassif.py
@@ -244,8 +244,8 @@ class Test_execBenchmark(unittest.TestCase):
         res = exec_classif.exec_benchmark(1, 2, 3, cls.argument_dictionaries,
                                          [[[1, 2], [3, 4, 5]]], 5, 6, 7, 8, 9,
                                          10, cls.Dataset,
-                                         exec_one_benchmark=fakeBenchmarkExec,
-                                         exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                         # exec_one_benchmark=fakeBenchmarkExec,
+                                         # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
@@ -257,8 +257,8 @@ class Test_execBenchmark(unittest.TestCase):
         res = exec_classif.exec_benchmark(2, 1, 2, cls.argument_dictionaries,
                                          [[[1, 2], [3, 4, 5]]], 5, 6, 7, 8, 9,
                                          10, cls.Dataset,
-                                         exec_one_benchmark=fakeBenchmarkExec,
-                                         exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                         # exec_one_benchmark=fakeBenchmarkExec,
+                                         # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
@@ -272,8 +272,8 @@ class Test_execBenchmark(unittest.TestCase):
         res = exec_classif.exec_benchmark(2, 2, 2, cls.argument_dictionaries,
                                          [[[1, 2], [3, 4, 5]]], 5, 6, 7, 8, 9,
                                          10, cls.Dataset,
-                                         exec_one_benchmark=fakeBenchmarkExec,
-                                         exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                         # exec_one_benchmark=fakeBenchmarkExec,
+                                         # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
@@ -283,8 +283,8 @@ class Test_execBenchmark(unittest.TestCase):
         res = exec_classif.exec_benchmark(2, 1, 1, cls.argument_dictionaries,
                                          [[[1, 2], [3, 4, 5]]], 5, 6, 7, 8, 9,
                                          10, cls.Dataset,
-                                         exec_one_benchmark=fakeBenchmarkExec,
-                                         exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
+                                         # exec_one_benchmark=fakeBenchmarkExec,
+                                         # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
                                          get_results=fakegetResults,
                                          delete=fakeDelete)
@@ -323,129 +323,129 @@ class FakeKfold():
         return [([X[0], X[1]], [X[2], X[3]]), (([X[2], X[3]], [X[0], X[1]]))]
 
 
-class Test_execOneBenchmark(unittest.TestCase):
-
-    @classmethod
-    def setUp(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.args = {
-            "Base": {"name": "chicken_is_heaven", "type": "type",
-                     "pathf": "pathF"},
-            "Classification": {"hps_iter": 1}}
-
-    def test_simple(cls):
-        flag, results = exec_classif.exec_one_benchmark(core_index=10,
-                                                      labels_dictionary={
-                                                                   0: "a",
-                                                                   1: "b"},
-                                                      directory=tmp_path,
-                                                      classification_indices=(
-                                                               [1, 2, 3, 4],
-                                                               [0, 5, 6, 7, 8]),
-                                                               args=cls.args,
-                                                               k_folds=FakeKfold(),
-                                                               random_state="try",
-                                                               hyper_param_search="try",
-                                                               metrics="try",
-                                                               argument_dictionaries={
-                                                                   "Monoview": [
-                                                                       {
-                                                                           "try": 0},
-                                                                       {
-                                                                           "try2": 100}],
-                                                                   "multiview":[{
-                                                                           "try3": 5},
-                                                                       {
-                                                                           "try4": 10}]},
-                                                      benchmark="try",
-                                                      views="try",
-                                                      views_indices="try",
-                                                      flag=None,
-                                                      labels=np.array(
-                                                                   [0, 1, 2, 1,
-                                                                    2, 2, 2, 12,
-                                                                    1, 2, 1, 1,
-                                                                    2, 1, 21]),
-                                                      exec_monoview_multicore=fakeExecMono,
-                                                      exec_multiview_multicore=fakeExecMulti,)
-
-        cls.assertEqual(flag, None)
-        cls.assertEqual(results ,
-                        [['Mono', {'try': 0}], ['Mono', {'try2': 100}],
-                         ['Multi', {'try3': 5}], ['Multi', {'try4': 10}]])
-
-    @classmethod
-    def tearDown(cls):
-        path = tmp_path
-        for file_name in os.listdir(path):
-            dir_path = os.path.join(path, file_name)
-            if os.path.isdir(dir_path):
-                for file_name in os.listdir(dir_path):
-                    os.remove(os.path.join(dir_path, file_name))
-                os.rmdir(dir_path)
-            else:
-                os.remove(os.path.join(path, file_name))
-        os.rmdir(path)
-
-
-class Test_execOneBenchmark_multicore(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.args = {
-            "Base": {"name": "chicken_is_heaven", "type": "type",
-                     "pathf": "pathF"},
-            "Classification": {"hps_iter": 1}}
-
-    def test_simple(cls):
-        flag, results = exec_classif.exec_one_benchmark_multicore(
-            nb_cores=2,
-            labels_dictionary={0: "a", 1: "b"},
-            directory=tmp_path,
-            classification_indices=([1, 2, 3, 4], [0, 10, 20, 30, 40]),
-            args=cls.args,
-            k_folds=FakeKfold(),
-            random_state="try",
-            hyper_param_search="try",
-            metrics="try",
-            argument_dictionaries={
-                                                                   "monoview": [
-                                                                       {
-                                                                           "try": 0},
-                                                                       {
-                                                                           "try2": 100}],
-                                                                   "multiview":[{
-                                                                           "try3": 5},
-                                                                       {
-                                                                           "try4": 10}]},
-            benchmark="try",
-            views="try",
-            views_indices="try",
-            flag=None,
-            labels=np.array([0, 1, 2, 3, 4, 2, 2, 12, 1, 2, 1, 1, 2, 1, 21]),
-            exec_monoview_multicore=fakeExecMono,
-            exec_multiview_multicore=fakeExecMulti,)
-
-        cls.assertEqual(flag, None)
-        cls.assertEqual(results ,
-                        [['Mono', {'try': 0}], ['Mono', {'try2': 100}],
-                         ['Multi', {'try3': 5}], ['Multi', {'try4': 10}]])
-
-    @classmethod
-    def tearDown(cls):
-        path = tmp_path
-        for file_name in os.listdir(path):
-            dir_path = os.path.join(path, file_name)
-            if os.path.isdir(dir_path):
-                for file_name in os.listdir(dir_path):
-                    os.remove(os.path.join(dir_path, file_name))
-                os.rmdir(dir_path)
-            else:
-                os.remove(os.path.join(path, file_name))
-        os.rmdir(path)
+# class Test_execOneBenchmark(unittest.TestCase):
+#
+#     @classmethod
+#     def setUp(cls):
+#         rm_tmp()
+#         os.mkdir(tmp_path)
+#         cls.args = {
+#             "Base": {"name": "chicken_is_heaven", "type": "type",
+#                      "pathf": "pathF"},
+#             "Classification": {"hps_iter": 1}}
+#
+#     def test_simple(cls):
+#         flag, results = exec_classif.exec_one_benchmark(core_index=10,
+#                                                       labels_dictionary={
+#                                                                    0: "a",
+#                                                                    1: "b"},
+#                                                       directory=tmp_path,
+#                                                       classification_indices=(
+#                                                                [1, 2, 3, 4],
+#                                                                [0, 5, 6, 7, 8]),
+#                                                                args=cls.args,
+#                                                                k_folds=FakeKfold(),
+#                                                                random_state="try",
+#                                                                hyper_param_search="try",
+#                                                                metrics="try",
+#                                                                argument_dictionaries={
+#                                                                    "Monoview": [
+#                                                                        {
+#                                                                            "try": 0},
+#                                                                        {
+#                                                                            "try2": 100}],
+#                                                                    "multiview":[{
+#                                                                            "try3": 5},
+#                                                                        {
+#                                                                            "try4": 10}]},
+#                                                       benchmark="try",
+#                                                       views="try",
+#                                                       views_indices="try",
+#                                                       flag=None,
+#                                                       labels=np.array(
+#                                                                    [0, 1, 2, 1,
+#                                                                     2, 2, 2, 12,
+#                                                                     1, 2, 1, 1,
+#                                                                     2, 1, 21]),
+#                                                       exec_monoview_multicore=fakeExecMono,
+#                                                       exec_multiview_multicore=fakeExecMulti,)
+#
+#         cls.assertEqual(flag, None)
+#         cls.assertEqual(results ,
+#                         [['Mono', {'try': 0}], ['Mono', {'try2': 100}],
+#                          ['Multi', {'try3': 5}], ['Multi', {'try4': 10}]])
+#
+#     @classmethod
+#     def tearDown(cls):
+#         path = tmp_path
+#         for file_name in os.listdir(path):
+#             dir_path = os.path.join(path, file_name)
+#             if os.path.isdir(dir_path):
+#                 for file_name in os.listdir(dir_path):
+#                     os.remove(os.path.join(dir_path, file_name))
+#                 os.rmdir(dir_path)
+#             else:
+#                 os.remove(os.path.join(path, file_name))
+#         os.rmdir(path)
+#
+#
+# class Test_execOneBenchmark_multicore(unittest.TestCase):
+#
+#     @classmethod
+#     def setUpClass(cls):
+#         rm_tmp()
+#         os.mkdir(tmp_path)
+#         cls.args = {
+#             "Base": {"name": "chicken_is_heaven", "type": "type",
+#                      "pathf": "pathF"},
+#             "Classification": {"hps_iter": 1}}
+#
+#     def test_simple(cls):
+#         flag, results = exec_classif.exec_one_benchmark_multicore(
+#             nb_cores=2,
+#             labels_dictionary={0: "a", 1: "b"},
+#             directory=tmp_path,
+#             classification_indices=([1, 2, 3, 4], [0, 10, 20, 30, 40]),
+#             args=cls.args,
+#             k_folds=FakeKfold(),
+#             random_state="try",
+#             hyper_param_search="try",
+#             metrics="try",
+#             argument_dictionaries={
+#                                                                    "monoview": [
+#                                                                        {
+#                                                                            "try": 0},
+#                                                                        {
+#                                                                            "try2": 100}],
+#                                                                    "multiview":[{
+#                                                                            "try3": 5},
+#                                                                        {
+#                                                                            "try4": 10}]},
+#             benchmark="try",
+#             views="try",
+#             views_indices="try",
+#             flag=None,
+#             labels=np.array([0, 1, 2, 3, 4, 2, 2, 12, 1, 2, 1, 1, 2, 1, 21]),
+#             exec_monoview_multicore=fakeExecMono,
+#             exec_multiview_multicore=fakeExecMulti,)
+#
+#         cls.assertEqual(flag, None)
+#         cls.assertEqual(results ,
+#                         [['Mono', {'try': 0}], ['Mono', {'try2': 100}],
+#                          ['Multi', {'try3': 5}], ['Multi', {'try4': 10}]])
+#
+#     @classmethod
+#     def tearDown(cls):
+#         path = tmp_path
+#         for file_name in os.listdir(path):
+#             dir_path = os.path.join(path, file_name)
+#             if os.path.isdir(dir_path):
+#                 for file_name in os.listdir(dir_path):
+#                     os.remove(os.path.join(dir_path, file_name))
+#                 os.rmdir(dir_path)
+#             else:
+#                 os.remove(os.path.join(path, file_name))
+#         os.rmdir(path)
 
 
 class Test_set_element(unittest.TestCase):
diff --git a/multiview_platform/tests/test_ResultAnalysis.py b/multiview_platform/tests/test_ResultAnalysis.py
index 2141c714f1a6666a7a7d79d627c4c8354458d4f7..6b39cf9ec896b3803403bc42d0557b2922d61e09 100644
--- a/multiview_platform/tests/test_ResultAnalysis.py
+++ b/multiview_platform/tests/test_ResultAnalysis.py
@@ -265,3 +265,5 @@ class Test_gen_error_data_glob(unittest.TestCase):
 
 
 
+
+