diff --git a/summit/multiview_platform/result_analysis/execution.py b/summit/multiview_platform/result_analysis/execution.py
index 7e046df70434a1e1a99213120b84ff89e23a6e24..931d61863f3ae12128ce2c2c8c4933312045ff00 100644
--- a/summit/multiview_platform/result_analysis/execution.py
+++ b/summit/multiview_platform/result_analysis/execution.py
@@ -129,7 +129,8 @@ def analyze_all(iter_results, stats_iter, directory, data_base_name,
     publish_all_sample_errors(error_analysis, directory, stats_iter,
                               sample_ids, labels, data_base_name, label_names)
     publish_feature_importances(feature_importances, directory,
-                                data_base_name, feature_importances_stds)
+                                data_base_name, feature_importances_stds,
+                                metric_scores=metrics_analysis)
     plot_durations(duration_means, directory, data_base_name, duration_stds)
     return results
 
diff --git a/summit/multiview_platform/result_analysis/feature_importances.py b/summit/multiview_platform/result_analysis/feature_importances.py
index 0735c6eaf12ef953957f60261e12c9767e2a357b..36c0eb3514b0fa3db388af10803b60f2f245f011 100644
--- a/summit/multiview_platform/result_analysis/feature_importances.py
+++ b/summit/multiview_platform/result_analysis/feature_importances.py
@@ -102,6 +102,7 @@ def publish_feature_importances(feature_importances, directory, database_name,
 def plot_feature_importances(file_name, feature_importance,
                              feature_std):  # pragma: no cover
     s = feature_importance.sum(axis=1)
+    s = s[s!=0]
     feature_importance = feature_importance.loc[s.sort_values(ascending=False).index]
     feature_importance.to_csv(file_name + "_dataframe.csv")
     hover_text = [["-Feature :" + str(feature_name) +
@@ -134,6 +135,8 @@ def plot_feature_relevance(file_name, feature_importance,
                              feature_std, metric_scores): # pragma: no cover
     for metric, score_df in metric_scores.items():
         if metric.endswith("*"):
+            if isinstance(score_df, dict):
+                score_df = score_df["mean"]
             for score in score_df.columns:
                 if len(score.split("-"))>1:
                     algo, view = score.split("-")