From 74ce73a83b2a93963873f454a376b98167ec815d Mon Sep 17 00:00:00 2001
From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr>
Date: Mon, 20 Jan 2020 14:17:28 +0100
Subject: [PATCH] Result analysis perfectible

---
 config_files/config_test.yml                   | 18 +++++++++---------
 .../result_analysis.py                         |  4 ++--
 .../tests/test_ResultAnalysis.py               |  2 +-
 3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/config_files/config_test.yml b/config_files/config_test.yml
index 104cb0a0..f54013c7 100644
--- a/config_files/config_test.yml
+++ b/config_files/config_test.yml
@@ -1,11 +1,11 @@
 # The base configuration of the benchmark
 Base :
   log: True
-  name: ["plausible"]
+  name: ["outliers_dset"]
   label: "_"
   type: ".hdf5"
   views:
-  pathf: "../data/"
+  pathf: "/home/baptiste/Documents/Datasets/Generated/outliers_dset/"
   nice: 0
   random_state: 42
   nb_cores: 1
@@ -18,18 +18,18 @@ Base :
 # All the classification-realted configuration options
 Classification:
   multiclass_method: "oneVersusOne"
-  split: 0.4
+  split: 0.2
   nb_folds: 2
   nb_class: 2
   classes:
-  type: ["monoview"]
-  algos_monoview: ["adaboost",]
-  algos_multiview: ["svm_jumbo_fusion"]
-  stats_iter: 2
+  type: ["monoview", "multiview"]
+  algos_monoview: ["decision_tree", "adaboost", "svm_linear", "random_forest"]
+  algos_multiview: ["weighted_linear_early_fusion", "difficulty_fusion", "double_fault_fusion"]
+  stats_iter: 30
   metrics: ["accuracy_score", "f1_score"]
-  metric_princ: "f1_score"
+  metric_princ: "accuracy_score"
   hps_type: "randomized_search-equiv"
-  hps_iter: 1
+  hps_iter: 5
 
 
 #####################################
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
index e33f1a72..f529f1af 100644
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis.py
+++ b/multiview_platform/mono_multi_view_classifiers/result_analysis.py
@@ -7,6 +7,7 @@ import yaml
 
 import matplotlib as mpl
 from matplotlib.patches import Patch
+
 # Import third party modules
 import matplotlib.pyplot as plt
 import numpy as np
@@ -213,7 +214,7 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
     plt.close()
     ### The following part is used to generate an interactive graph.
     if use_plotly:
-        label_index_list = [np.where(labels==i)[0] for i in np.unique(labels)]
+        label_index_list = [np.arange(len(labels))] #[np.where(labels==i)[0] for i in np.unique(labels)]
         hover_text = [[example_ids[i] + " failed "+ str(stats_iter-data[i,j])+" time(s)"
                        for j in range(data.shape[1])]
                       for i in range(data.shape[0]) ]
@@ -732,7 +733,6 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
         metrics_scores = get_metrics_scores_biclass(metrics, result)
         example_errors = get_example_errors_biclass(arguments["labels"], result)
         feature_importances = get_feature_importances(result)
-        print(feature_importances)
         directory = arguments["directory"]
 
         database_name = arguments["args"]["Base"]["name"]
diff --git a/multiview_platform/tests/test_ResultAnalysis.py b/multiview_platform/tests/test_ResultAnalysis.py
index dff6a23f..6fb4b0d7 100644
--- a/multiview_platform/tests/test_ResultAnalysis.py
+++ b/multiview_platform/tests/test_ResultAnalysis.py
@@ -182,7 +182,7 @@ class Test_gen_error_data(unittest.TestCase):
 class Test_format_previous_results(unittest.TestCase):
 
     def test_simple(self):
-        biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[]}}
+        biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[]}}
         random_state = np.random.RandomState(42)
 
         # Gen metrics data
-- 
GitLab