From 59a42096f00975d20a63b1f35c49845d0f7aa617 Mon Sep 17 00:00:00 2001 From: Baptiste Bauvin <baptiste.bauvin@lis-lab.fr> Date: Wed, 23 Mar 2022 11:36:05 -0400 Subject: [PATCH] Added some other feat_imp --- summit/multiview_platform/result_analysis/execution.py | 3 ++- .../multiview_platform/result_analysis/feature_importances.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/summit/multiview_platform/result_analysis/execution.py b/summit/multiview_platform/result_analysis/execution.py index 7e046df7..931d6186 100644 --- a/summit/multiview_platform/result_analysis/execution.py +++ b/summit/multiview_platform/result_analysis/execution.py @@ -129,7 +129,8 @@ def analyze_all(iter_results, stats_iter, directory, data_base_name, publish_all_sample_errors(error_analysis, directory, stats_iter, sample_ids, labels, data_base_name, label_names) publish_feature_importances(feature_importances, directory, - data_base_name, feature_importances_stds) + data_base_name, feature_importances_stds, + metric_scores=metrics_analysis) plot_durations(duration_means, directory, data_base_name, duration_stds) return results diff --git a/summit/multiview_platform/result_analysis/feature_importances.py b/summit/multiview_platform/result_analysis/feature_importances.py index 0735c6ea..36c0eb35 100644 --- a/summit/multiview_platform/result_analysis/feature_importances.py +++ b/summit/multiview_platform/result_analysis/feature_importances.py @@ -102,6 +102,7 @@ def publish_feature_importances(feature_importances, directory, database_name, def plot_feature_importances(file_name, feature_importance, feature_std): # pragma: no cover s = feature_importance.sum(axis=1) + s = s[s!=0] feature_importance = feature_importance.loc[s.sort_values(ascending=False).index] feature_importance.to_csv(file_name + "_dataframe.csv") hover_text = [["-Feature :" + str(feature_name) + @@ -134,6 +135,8 @@ def plot_feature_relevance(file_name, feature_importance, feature_std, metric_scores): # pragma: no cover for metric, score_df in metric_scores.items(): if metric.endswith("*"): + if isinstance(score_df, dict): + score_df = score_df["mean"] for score in score_df.columns: if len(score.split("-"))>1: algo, view = score.split("-") -- GitLab