diff --git a/config_files/config_test.yml b/config_files/config_test.yml
index 104cb0a091c3a231600c7961cafcea2e79d4b13a..f54013c707f38890fdad1e077dc5834b7ac51d83 100644
--- a/config_files/config_test.yml
+++ b/config_files/config_test.yml
@@ -1,11 +1,11 @@
 # The base configuration of the benchmark
 Base :
   log: True
-  name: ["plausible"]
+  name: ["outliers_dset"]
   label: "_"
   type: ".hdf5"
   views:
-  pathf: "../data/"
+  pathf: "/home/baptiste/Documents/Datasets/Generated/outliers_dset/"
   nice: 0
   random_state: 42
   nb_cores: 1
@@ -18,18 +18,18 @@ Base :
 # All the classification-realted configuration options
 Classification:
   multiclass_method: "oneVersusOne"
-  split: 0.4
+  split: 0.2
   nb_folds: 2
   nb_class: 2
   classes:
-  type: ["monoview"]
-  algos_monoview: ["adaboost",]
-  algos_multiview: ["svm_jumbo_fusion"]
-  stats_iter: 2
+  type: ["monoview", "multiview"]
+  algos_monoview: ["decision_tree", "adaboost", "svm_linear", "random_forest"]
+  algos_multiview: ["weighted_linear_early_fusion", "difficulty_fusion", "double_fault_fusion"]
+  stats_iter: 30
   metrics: ["accuracy_score", "f1_score"]
-  metric_princ: "f1_score"
+  metric_princ: "accuracy_score"
   hps_type: "randomized_search-equiv"
-  hps_iter: 1
+  hps_iter: 5
 
 
 #####################################
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
index e33f1a72d0338ef8bb3fb08816cb1cd9536c9764..f529f1af4fa4eeb15b7e54de9fed451e67d0c662 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
@@ -7,6 +7,7 @@ import yaml
 
 import matplotlib as mpl
 from matplotlib.patches import Patch
+
 # Import third party modules
 import matplotlib.pyplot as plt
 import numpy as np
@@ -213,7 +214,7 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
     plt.close()
     ### The following part is used to generate an interactive graph.
     if use_plotly:
-        label_index_list = [np.where(labels==i)[0] for i in np.unique(labels)]
+        label_index_list = [np.arange(len(labels))] #[np.where(labels==i)[0] for i in np.unique(labels)]
         hover_text = [[example_ids[i] + " failed "+ str(stats_iter-data[i,j])+" time(s)"
                        for j in range(data.shape[1])]
                       for i in range(data.shape[0]) ]
@@ -732,7 +733,6 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
         metrics_scores = get_metrics_scores_biclass(metrics, result)
         example_errors = get_example_errors_biclass(arguments["labels"], result)
         feature_importances = get_feature_importances(result)
-        print(feature_importances)
         directory = arguments["directory"]
 
         database_name = arguments["args"]["Base"]["name"]
diff --git a/multiview_platform/tests/test_ResultAnalysis.py b/multiview_platform/tests/test_ResultAnalysis.py
index dff6a23f874ca1eba41271261e4557626654af82..6fb4b0d799bef1b2638db50fc1188607a47d7de9 100644
--- a/multiview_platform/tests/test_ResultAnalysis.py
+++ b/multiview_platform/tests/test_ResultAnalysis.py
@@ -182,7 +182,7 @@ class Test_gen_error_data(unittest.TestCase):
 class Test_format_previous_results(unittest.TestCase):
 
     def test_simple(self):
-        biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[]}}
+        biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[]}}
         random_state = np.random.RandomState(42)
 
         # Gen metrics data