diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b7aacdd3be4737f0d9b47fe6460a27c4c6d768cd..cdfcf30ba53ea9841566f0b198075f4baf8baa15 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -4,7 +4,9 @@ tests:
     tags:
         - docker
     script:
-        - pip3 install -e . --no-deps
+        - export LC_ALL=$(locale -a | grep en_US)
+        - export LANG=$(locale -a | grep en_US)
+        - pip3 install -e .
         - pytest-3
     coverage: '/^TOTAL.+?(\d+\%)$/'
     artifacts:
diff --git a/README.rst b/README.rst
index 0b6e662707f208275b93470713473e6324e8659b..cd6a3590e048de01a9e3ba8000789338c0d1f4bb 100644
--- a/README.rst
+++ b/README.rst
@@ -57,6 +57,7 @@ And the following python modules will be automatically installed  :
 * `pyyaml <https://pypi.org/project/PyYAML/>`_ - Used to read the config files,
 * `plotly <https://plot.ly/>`_ - Used to generate interactive HTML visuals,
 * `tabulate <https://pypi.org/project/tabulate/>`_ - Used to generated the confusion matrix.
+* `pyscm-ml <https://pypi.org/project/pyscm-ml/>`_ - 
 
 
 Installing
diff --git a/docker/Dockerfile_ubuntu_18.04 b/docker/Dockerfile_ubuntu_18.04
index 4e3bfa76f57ec17ee49037656c60792cbd59fd24..026c49a3542d5ad55a87625665f377fbca3c33f0 100644
--- a/docker/Dockerfile_ubuntu_18.04
+++ b/docker/Dockerfile_ubuntu_18.04
@@ -29,6 +29,7 @@ RUN apt-get install -y --no-install-recommends locales && \
     update-locale en_US.UTF-8 && \
     echo "export LC_ALL=$(locale -a | grep en_US)" >> /root/.bashrc && \
     echo "export LANG=$(locale -a | grep en_US)" >>  /root/.bashrc
+ENV LANGUAGE'en_US:en'
 COPY requirements.txt .
 RUN pip3 install -r requirements.txt
 
diff --git a/requirements.txt b/requirements.txt
index 62f9661b7cf299ea83f7585cc3dcfdfec114e37c..ed308353300ddc13fc59db4ded504ec9d26ea2a4 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,3 +11,4 @@ pyyaml>=3.12
 plotly>=4.2.1
 matplotlib>=3.1.1
 tabulate>=0.8.6
+pyscm-ml>=1.0.0
\ No newline at end of file
diff --git a/summit/multiview_platform/result_analysis/execution.py b/summit/multiview_platform/result_analysis/execution.py
index 7e046df70434a1e1a99213120b84ff89e23a6e24..931d61863f3ae12128ce2c2c8c4933312045ff00 100644
--- a/summit/multiview_platform/result_analysis/execution.py
+++ b/summit/multiview_platform/result_analysis/execution.py
@@ -129,7 +129,8 @@ def analyze_all(iter_results, stats_iter, directory, data_base_name,
     publish_all_sample_errors(error_analysis, directory, stats_iter,
                               sample_ids, labels, data_base_name, label_names)
     publish_feature_importances(feature_importances, directory,
-                                data_base_name, feature_importances_stds)
+                                data_base_name, feature_importances_stds,
+                                metric_scores=metrics_analysis)
     plot_durations(duration_means, directory, data_base_name, duration_stds)
     return results
 
diff --git a/summit/multiview_platform/result_analysis/feature_importances.py b/summit/multiview_platform/result_analysis/feature_importances.py
index 0735c6eaf12ef953957f60261e12c9767e2a357b..36c0eb3514b0fa3db388af10803b60f2f245f011 100644
--- a/summit/multiview_platform/result_analysis/feature_importances.py
+++ b/summit/multiview_platform/result_analysis/feature_importances.py
@@ -102,6 +102,7 @@ def publish_feature_importances(feature_importances, directory, database_name,
 def plot_feature_importances(file_name, feature_importance,
                              feature_std):  # pragma: no cover
     s = feature_importance.sum(axis=1)
+    s = s[s!=0]
     feature_importance = feature_importance.loc[s.sort_values(ascending=False).index]
     feature_importance.to_csv(file_name + "_dataframe.csv")
     hover_text = [["-Feature :" + str(feature_name) +
@@ -134,6 +135,8 @@ def plot_feature_relevance(file_name, feature_importance,
                              feature_std, metric_scores): # pragma: no cover
     for metric, score_df in metric_scores.items():
         if metric.endswith("*"):
+            if isinstance(score_df, dict):
+                score_df = score_df["mean"]
             for score in score_df.columns:
                 if len(score.split("-"))>1:
                     algo, view = score.split("-")
diff --git a/summit/tests/test_multi_view/test_multiview_utils.py b/summit/tests/test_multi_view/test_multiview_utils.py
index 491d0134b2943be11436bf3bacef2eff6cb08614..99d725253c7a0341719913856259c00d65fba3a9 100644
--- a/summit/tests/test_multi_view/test_multiview_utils.py
+++ b/summit/tests/test_multi_view/test_multiview_utils.py
@@ -79,6 +79,7 @@ class TestFunctions(unittest.TestCase):
                                  'knn',
                                  'lasso',
                                  'random_forest',
+                                 'scm',
                                  'sgd',
                                  'svm_linear',
                                  'svm_poly',
@@ -90,6 +91,7 @@ class TestFunctions(unittest.TestCase):
                                  'gradient_boosting',
                                  'knn',
                                  'random_forest',
+                                 'scm',
                                  'svm_linear',
                                  'svm_poly',
                                  'svm_rbf'])