Skip to content
Snippets Groups Projects
Commit 74ce73a8 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Result analysis perfectible

parent 4b02f3c3
No related branches found
No related tags found
No related merge requests found
Pipeline #3882 failed
# The base configuration of the benchmark
Base :
log: True
name: ["plausible"]
name: ["outliers_dset"]
label: "_"
type: ".hdf5"
views:
pathf: "../data/"
pathf: "/home/baptiste/Documents/Datasets/Generated/outliers_dset/"
nice: 0
random_state: 42
nb_cores: 1
......@@ -18,18 +18,18 @@ Base :
# All the classification-realted configuration options
Classification:
multiclass_method: "oneVersusOne"
split: 0.4
split: 0.2
nb_folds: 2
nb_class: 2
classes:
type: ["monoview"]
algos_monoview: ["adaboost",]
algos_multiview: ["svm_jumbo_fusion"]
stats_iter: 2
type: ["monoview", "multiview"]
algos_monoview: ["decision_tree", "adaboost", "svm_linear", "random_forest"]
algos_multiview: ["weighted_linear_early_fusion", "difficulty_fusion", "double_fault_fusion"]
stats_iter: 30
metrics: ["accuracy_score", "f1_score"]
metric_princ: "f1_score"
metric_princ: "accuracy_score"
hps_type: "randomized_search-equiv"
hps_iter: 1
hps_iter: 5
#####################################
......
......@@ -7,6 +7,7 @@ import yaml
import matplotlib as mpl
from matplotlib.patches import Patch
# Import third party modules
import matplotlib.pyplot as plt
import numpy as np
......@@ -213,7 +214,7 @@ def plot_2d(data, classifiers_names, nbClassifiers, nbExamples,
plt.close()
### The following part is used to generate an interactive graph.
if use_plotly:
label_index_list = [np.where(labels==i)[0] for i in np.unique(labels)]
label_index_list = [np.arange(len(labels))] #[np.where(labels==i)[0] for i in np.unique(labels)]
hover_text = [[example_ids[i] + " failed "+ str(stats_iter-data[i,j])+" time(s)"
for j in range(data.shape[1])]
for i in range(data.shape[0]) ]
......@@ -732,7 +733,6 @@ def analyze_biclass(results, benchmark_argument_dictionaries, stats_iter, metric
metrics_scores = get_metrics_scores_biclass(metrics, result)
example_errors = get_example_errors_biclass(arguments["labels"], result)
feature_importances = get_feature_importances(result)
print(feature_importances)
directory = arguments["directory"]
database_name = arguments["args"]["Base"]["name"]
......
......@@ -182,7 +182,7 @@ class Test_gen_error_data(unittest.TestCase):
class Test_format_previous_results(unittest.TestCase):
def test_simple(self):
biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[]}}
biclass_results = {"01":{"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[]}}
random_state = np.random.RandomState(42)
# Gen metrics data
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment