diff --git a/README.md b/README.md
index 9084dbadd7f6227ceeba1dbcb383fc5b65b9a9b1..09aa84b0e8317eb954025da5de7e85dcda98707f 100644
--- a/README.md
+++ b/README.md
@@ -40,6 +40,14 @@ pip install -e .
 ```
 In the `summit` directory to install SuMMIT and its dependencies.
 
+### Running the tests
+
+To run the test suite of SuMMIT, run :
+```
+cd path/to/summit
+pytest
+```
+
 ### Running on simulated data
 
 In order to run it you'll need to try on **simulated** data with the command
diff --git a/multiview-machine-learning-omis.iml b/multiview-machine-learning-omis.iml
deleted file mode 100644
index 230475df4d4fa70020770781f430ff870070c286..0000000000000000000000000000000000000000
--- a/multiview-machine-learning-omis.iml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module type="WEB_MODULE" version="4">
-  <component name="NewModuleRootManager">
-    <content url="file://$MODULE_DIR$" />
-    <orderEntry type="jdk" jdkName="Python 3.6 (develop)" jdkType="Python SDK" />
-    <orderEntry type="sourceFolder" forTests="false" />
-    <orderEntry type="module" module-name="multiview_generator" />
-    <orderEntry type="module" module-name="short_projects" />
-    <orderEntry type="library" name="R User Library" level="project" />
-    <orderEntry type="library" name="R Skeletons" level="application" />
-    <orderEntry type="module" module-name="Datasets" />
-    <orderEntry type="module" module-name="scikit-multimodallearn" />
-    <orderEntry type="module" module-name="lives_dataset" />
-  </component>
-</module>
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
index aaf8743c0ae2668fca593f94bc35e04b36fca949..91d931bec67a0a77b9da9732402c180f8784257c 100644
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
@@ -788,8 +788,7 @@ def exec_classif(arguments):  # pragma: no cover
                            for metric_name in metrics_names)
         metrics = arange_metrics(metrics, args["metric_princ"])
 
-        benchmark = init_benchmark(cl_type, monoview_algos, multiview_algos,
-                                   args)
+        benchmark = init_benchmark(cl_type, monoview_algos, multiview_algos,)
         init_kwargs = init_kwargs_func(args, benchmark)
         data_base_time = time.time() - start
         argument_dictionaries = init_argument_dictionaries(