diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a874c2df756bb4b9bf7cc222c08cd43f9e977e4e..4e0f6146214e674c4c0fae99f06f937873d90894 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -23,7 +23,7 @@ doc:
         - export LC_ALL=$(locale -a | grep en_US)
         - export LANG=$(locale -a | grep en_US)
         - pip3 install -e . --no-deps
-        - sphinx-apidoc -o docs/source multiview_platform
+        - sphinx-apidoc -o docs/source summit
         - cd docs/source
         - sphinx-build -b html . ../build
         - cd ../..
@@ -45,7 +45,7 @@ pages:
         - export LANG=$(locale -a | grep en_US)
         - pip3 install -e . --no-deps
         - pytest-3
-        - sphinx-apidoc -o docs/source multiview_platform
+        - sphinx-apidoc -o docs/source summit
         - cd docs/source
         - sphinx-build -b html . ../build
         - cd ../..
diff --git a/README.md b/README.md
index 09aa84b0e8317eb954025da5de7e85dcda98707f..9a15dc2951a17f6820550af09ed9d46edaa4d37c 100644
--- a/README.md
+++ b/README.md
@@ -71,7 +71,7 @@ to read it carefully before playing around with the parameters.
 
 You can create your own configuration file. In order to run the platform with it, run : 
 ```python
-from multiview_platform.execute import execute
+from summit.execute import execute
 execute(config_path="/absolute/path/to/your/config/file")
 ```
 
diff --git a/docs/build/.doctrees/analyzeresult.doctree b/docs/build/.doctrees/analyzeresult.doctree
index b8d35dcf6c7222c0b30766eb9c326e0026796a5a..0be62e701f090628368ae2c0935db6081291850c 100644
Binary files a/docs/build/.doctrees/analyzeresult.doctree and b/docs/build/.doctrees/analyzeresult.doctree differ
diff --git a/docs/build/.doctrees/api.doctree b/docs/build/.doctrees/api.doctree
index 7f44f77551c729acf56275d10cd353d055d0b94e..eb4b7f30685ed0cff26abc02653fb4efcd3fba58 100644
Binary files a/docs/build/.doctrees/api.doctree and b/docs/build/.doctrees/api.doctree differ
diff --git a/docs/build/.doctrees/environment.pickle b/docs/build/.doctrees/environment.pickle
index 43feb4d223b54ee975ddd6be581667d598289e53..07795c98f871675c48d4eb9506fd4bc1b461d690 100644
Binary files a/docs/build/.doctrees/environment.pickle and b/docs/build/.doctrees/environment.pickle differ
diff --git a/docs/build/.doctrees/execution.doctree b/docs/build/.doctrees/execution.doctree
index 11e3bc175cdfbb9b2b77fc7529e33f532b9dec3f..4a86e50057f889dc3d7dcef88b873a68465f1f5c 100644
Binary files a/docs/build/.doctrees/execution.doctree and b/docs/build/.doctrees/execution.doctree differ
diff --git a/docs/build/.doctrees/index.doctree b/docs/build/.doctrees/index.doctree
index edd354b30c675f56c4bf2ade85a70fbb6be1544c..04def4c76a8a1eb75d876b3dae2e720df92e01f9 100644
Binary files a/docs/build/.doctrees/index.doctree and b/docs/build/.doctrees/index.doctree differ
diff --git a/docs/build/.doctrees/modules.doctree b/docs/build/.doctrees/modules.doctree
index 9e17db095fa9d92f95454356791547ea386947dd..25cbaecbabdc2cc76a912e990e8196ee4711dcb6 100644
Binary files a/docs/build/.doctrees/modules.doctree and b/docs/build/.doctrees/modules.doctree differ
diff --git a/docs/build/.doctrees/readme_link.doctree b/docs/build/.doctrees/readme_link.doctree
index ae5fc7a166d2359e67528f0624b94b5e4a0c17f7..701f6f4be7c9c7aeb56c3a1fab407974828e8347 100644
Binary files a/docs/build/.doctrees/readme_link.doctree and b/docs/build/.doctrees/readme_link.doctree differ
diff --git a/docs/build/.doctrees/references/monomulti/exec_classif.doctree b/docs/build/.doctrees/references/monomulti/exec_classif.doctree
index 7a11ddd05eb4b1cbec9eed447e5373e71dab2c96..9f10201821229a9176ae41d8f77bfeb56ab5baec 100644
Binary files a/docs/build/.doctrees/references/monomulti/exec_classif.doctree and b/docs/build/.doctrees/references/monomulti/exec_classif.doctree differ
diff --git a/docs/build/.doctrees/references/monomulti/metrics.doctree b/docs/build/.doctrees/references/monomulti/metrics.doctree
index 096b7e694019c0cbe674a5d2468dd2c49c248000..42f60bb49ecfdef6b7853477a8f9b0d9868aa611 100644
Binary files a/docs/build/.doctrees/references/monomulti/metrics.doctree and b/docs/build/.doctrees/references/monomulti/metrics.doctree differ
diff --git a/docs/build/.doctrees/references/monomulti/multiview_classifiers/diversity_fusion.doctree b/docs/build/.doctrees/references/monomulti/multiview_classifiers/diversity_fusion.doctree
index 6490b29796a494cf3b719cd33d5de7df58f37e42..6ccd9737dbfdbf700b8ff5db11e127e8d6481d2c 100644
Binary files a/docs/build/.doctrees/references/monomulti/multiview_classifiers/diversity_fusion.doctree and b/docs/build/.doctrees/references/monomulti/multiview_classifiers/diversity_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/monomulti/utils/execution.doctree b/docs/build/.doctrees/references/monomulti/utils/execution.doctree
index 5284271e75adeefaa9cff1f6d9f2e8cf99c02edb..f376292a47303b682b2f996f8fe7b0d2ea13a9c3 100644
Binary files a/docs/build/.doctrees/references/monomulti/utils/execution.doctree and b/docs/build/.doctrees/references/monomulti/utils/execution.doctree differ
diff --git a/docs/build/.doctrees/references/monomulti/utils/multiclass.doctree b/docs/build/.doctrees/references/monomulti/utils/multiclass.doctree
index 92b9960d084d8d1d0ef32b0a39388d1d48fb61a8..7eb60e733771d281be43d022315174e4ab09d398 100644
Binary files a/docs/build/.doctrees/references/monomulti/utils/multiclass.doctree and b/docs/build/.doctrees/references/monomulti/utils/multiclass.doctree differ
diff --git a/docs/build/.doctrees/references/monomultidoc.doctree b/docs/build/.doctrees/references/monomultidoc.doctree
index 92a207a766fb12ae0808fc36f5ff1eb081f38c92..62d6e5d2d5154447a1942148279649b10e0a1406 100644
Binary files a/docs/build/.doctrees/references/monomultidoc.doctree and b/docs/build/.doctrees/references/monomultidoc.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.doctree b/docs/build/.doctrees/references/multiview_platform.doctree
index 2f4f5a4b1f410eeabee61f010e82b89d65549bfc..a81fdb8b514f49f94eda24e3cb260bc9a2df7c5f 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.doctree and b/docs/build/.doctrees/references/multiview_platform.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.doctree
index ff63a7ab9e017c98f47bd8601ba79773edadd56c..5472d280347227a4bf7ff13296b03281ba4c5970 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.doctree
index 74443661cf4a4f9b62ef9101ce88d72248e11e0a..a0bef86d66b1ba6f1c2e2c0e8176fc8f554567fc 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.doctree
index c8708cfd216a23926de331e7847522ab4fedbad4..a9040bbb81090d04b1baacf38f6022ef42656bd6 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.doctree
index e56ce96a42b5aff9c5042f7070ecf4a7ff9f6fd8..e5a1b76ce9232888d79f5943e510719f92f39b88 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.doctree
index f610c48f3b7b84bf675b6b1b7fc4a6e29710238a..cabc377b7222522ff78f9133c4e2a25e47c2cca3 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.doctree
index 69fdc21140e6ccab3847639ae883c01762775900..8ce724bd80ca6e6ae6172806ef8d14e18c66db9f 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.doctree
index 30f0a5b7201db4aae1255acb87d13f19543f561b..0bdd4fdac2d6e1435e7fc6e877bc1335b543beef 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.doctree
index 2341ebd88f7454b736aaa63e9958a0d15034ae98..44d7ff3e02fdba2d4a03c4f632132526db4fc1e2 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.doctree
index 9a108bdb5d978122881df1ed4de6299e89f4dba3..d0ff49940118741541daa6aeabb751d9688e5ce4 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.doctree
index 4e42192ed0baf39a2cfd89ff0779d59d88c66bd5..a775c88d61db5eb16b4de95e6a9a1c32d878200b 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.doctree
index 00fc20c086c2daf5d0578fba5afabd3f2ce6bce3..caa79be7317f251c4965893f46a8c5b3a09fb034 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.doctree
index f815d4c7bf765b3013b6522205867d5d2a36b21f..d74d1fb32fe829b6ef53bb6fbb659f74f2f3d107 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.doctree
index 1fb54e21818faca8390636d1eb8491b590a102a8..eadb382b168219347d9942038fc58ff251a17dc6 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.utils.doctree b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.utils.doctree
index eaa072d363d86d806fad896be003e0a0aaf61421..90a4fb8eebf0eae568d42ef41e7f74bfef3a0226 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.utils.doctree and b/docs/build/.doctrees/references/multiview_platform.mono_multi_view_classifiers.utils.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.doctree b/docs/build/.doctrees/references/multiview_platform.tests.doctree
index 26476af6724ba3880a1f3e198d1feef52d9baf52..cf6e87cdc0398b8950fd0089dfd36423c9236a62 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_metrics.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_metrics.doctree
index 2064227f2f93fa11e8f040ae9c94f652201eb2b8..0a497cbae47a7bd46f9c88a89180361d1b16fc13 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_metrics.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_metrics.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_mono_view.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_mono_view.doctree
index b92be902c18f4453c9deb425deb1f1b8951b8693..29da8f7d772ba8115ba576dfedd707ddf7c5dd6a 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_mono_view.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_mono_view.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_monoview_classifiers.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_monoview_classifiers.doctree
index a2dc9a022fbd68e22268c743696d8afe64994991..65baa8e2fcd4960d7c6c69555480410456c5e26d 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_monoview_classifiers.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_monoview_classifiers.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.doctree
index 6f4926fdc3001646180525630652e2b8af196aca..d1f76b3257a3f56fbbce61eed1f98a19884bcaa2 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.doctree
index d4b33d83260a6d19843fa72173119384d15c14c7..477ff25c147aa7a98ca4cc56eb7eda00713a0bc4 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.doctree
index 8adc51a8ed3f97e445663be96db79f2d92b6f5d2..33a74f13b3b2283ef56239be9803fedc7bc5dd22 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.doctree
index 1377316f94c62552dca66c876d0ce519e8002fd9..be4919f18e0a7e45e22e41eb7d35f255211b67c5 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.doctree
index f14595e0b876a96395a53b946e76aded3786b1f8..5d64c96cc2a06d61905991c4b5441a36f242c2ab 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.doctree
index 8e3959d7d9ba4660e7673380c6f4def677d3826c..9028c9458e733f12c0c570f510929942399443d9 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.doctree
index a9730f8bf548b8dd686326ea3b1d2d6b66ebacf9..c89d56ec01036a4ea143b421ded578eefbf9a31f 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_multiview_classifiers.doctree differ
diff --git a/docs/build/.doctrees/references/multiview_platform.tests.test_utils.doctree b/docs/build/.doctrees/references/multiview_platform.tests.test_utils.doctree
index a5e0bf8aa04dd608b22e269b6c61cfaacf90c109..bb1957167ae6c94614bfa784d1f2cb7f55201bce 100644
Binary files a/docs/build/.doctrees/references/multiview_platform.tests.test_utils.doctree and b/docs/build/.doctrees/references/multiview_platform.tests.test_utils.doctree differ
diff --git a/docs/build/.doctrees/tutorials/example1.doctree b/docs/build/.doctrees/tutorials/example1.doctree
index 478416f114e3db65707f4a31115ef156e8870513..e9f5ce65381f0706171543ed068eeffa89aae8b0 100644
Binary files a/docs/build/.doctrees/tutorials/example1.doctree and b/docs/build/.doctrees/tutorials/example1.doctree differ
diff --git a/docs/build/.doctrees/tutorials/example2.doctree b/docs/build/.doctrees/tutorials/example2.doctree
index cfbdafe63870b335e9c9ffcd86192b7390389353..0579bb50ca5456999273c55c98ee1a5ca9375b3c 100644
Binary files a/docs/build/.doctrees/tutorials/example2.doctree and b/docs/build/.doctrees/tutorials/example2.doctree differ
diff --git a/docs/build/analyzeresult.html b/docs/build/analyzeresult.html
index 3296c15955c057c7d44789dc79df87cf4dd45f89..bca0983ee76e6820be17f7d830366b37a40f0af7 100644
--- a/docs/build/analyzeresult.html
+++ b/docs/build/analyzeresult.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Result analysis module &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -36,8 +38,8 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="module-multiview_platform.mono_multi_view_classifiers.result_analysis">
-<span id="result-analysis-module"></span><h1>Result analysis module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.result_analysis" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="module-summit.multiview_platform.result_analysis">
+<span id="result-analysis-module"></span><h1>Result analysis module<a class="headerlink" href="#module-summit.multiview_platform.result_analysis" title="Permalink to this headline">¶</a></h1>
 </div>
 
 
diff --git a/docs/build/api.html b/docs/build/api.html
index 4ed7126374f6f1a22298069c9f612e9e205c0d45..5d9bb1ff1ddadbff39364d09b6e9eaa1a5026d35 100644
--- a/docs/build/api.html
+++ b/docs/build/api.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Multiview Platform &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
diff --git a/docs/build/execution.html b/docs/build/execution.html
index 5353134291364512c7b205cc46bb8e3b3cae9c45..c6420acb12a60d5b1269e0b6c689a2e6a431f25b 100644
--- a/docs/build/execution.html
+++ b/docs/build/execution.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Welcome to the exection documentation &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -36,8 +38,8 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="module-multiview_platform.execute">
-<span id="welcome-to-the-exection-documentation"></span><h1>Welcome to the exection documentation<a class="headerlink" href="#module-multiview_platform.execute" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="module-summit.execute">
+<span id="welcome-to-the-exection-documentation"></span><h1>Welcome to the exection documentation<a class="headerlink" href="#module-summit.execute" title="Permalink to this headline">¶</a></h1>
 <p>This is the execution module, used to execute the code</p>
 </div>
 
diff --git a/docs/build/genindex.html b/docs/build/genindex.html
index 87f5e41e6635b8d59bc7dc2ba4cf15bfa3b7ff95..262b50487b48cc4d4524d17d769b012cec5b3cf8 100644
--- a/docs/build/genindex.html
+++ b/docs/build/genindex.html
@@ -1,10 +1,12 @@
 
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Index &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -41,836 +43,31 @@
 <h1 id="index">Index</h1>
 
 <div class="genindex-jumpbox">
- <a href="#A"><strong>A</strong></a>
- | <a href="#B"><strong>B</strong></a>
- | <a href="#C"><strong>C</strong></a>
- | <a href="#D"><strong>D</strong></a>
- | <a href="#E"><strong>E</strong></a>
- | <a href="#F"><strong>F</strong></a>
- | <a href="#G"><strong>G</strong></a>
- | <a href="#H"><strong>H</strong></a>
- | <a href="#I"><strong>I</strong></a>
- | <a href="#M"><strong>M</strong></a>
- | <a href="#N"><strong>N</strong></a>
- | <a href="#O"><strong>O</strong></a>
- | <a href="#P"><strong>P</strong></a>
- | <a href="#R"><strong>R</strong></a>
+ <a href="#M"><strong>M</strong></a>
  | <a href="#S"><strong>S</strong></a>
- | <a href="#T"><strong>T</strong></a>
- | <a href="#U"><strong>U</strong></a>
- | <a href="#V"><strong>V</strong></a>
  
 </div>
-<h2 id="A">A</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative.accepts_multi_class">accepts_multi_class() (FakeEstimNative method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim.accepts_multi_class">(FakeMCEstim method)</a>
-</li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.add_gaussian_noise">add_gaussian_noise() (HDF5Dataset method)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics">arange_metrics() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics">[1]</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="B">B</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init">benchmark_init() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init">[1]</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="C">C</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.check_selected_label_names">check_selected_label_names() (Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.make_file_config.ConfigurationMaker">ConfigurationMaker (class in multiview_platform.mono_multi_view_classifiers.utils.make_file_config)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.confirm">confirm() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.copy_hdf5">copy_hdf5() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.copy_view">copy_view() (HDF5Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint">CustomRandint (class in multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform">CustomUniform (class in multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="D">D</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset">Dataset (class in multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.dataset">dataset (HDF5Dataset attribute)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.DatasetError">DatasetError</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.datasets_already_exist">datasets_already_exist() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.decision_function">decision_function() (OVOWrapper method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.delete_HDF5">delete_HDF5() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion">DifficultyFusion (class in multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion">DisagreeFusion (class in multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion.diversity_measure">diversity_measure() (DifficultyFusion method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion.diversity_measure">(DisagreeFusion method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion.diversity_measure">(DoubleFaultFusion method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion.diversity_measure">(EntropyFusion method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion">DoubleFaultFusion (class in multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="E">E</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion">EntropyFusion (class in multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark">exec_benchmark() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark">[1]</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif">exec_classif() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif">[1]</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_one_benchmark_mono_core">exec_one_benchmark_mono_core() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>
-</li>
-      <li><a href="references/multiview_platform.html#multiview_platform.execute.execute">execute() (in module multiview_platform.execute)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict">extract_dict() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.extract_subset">extract_subset() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="F">F</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.FakeArg">FakeArg (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeDset">FakeDset (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative">FakeEstimNative (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim">FakeMCEstim (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier">FakeMVClassifier (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb">FakeMVClassifierProb (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeNonProbaEstim">FakeNonProbaEstim (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim">FakeProbaEstim (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.filter">filter() (Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.filter">(HDF5Dataset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.filter">(RAMDataset method)</a>
-</li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.find_dataset_names">find_dataset_names() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.fit">fit() (FakeMVClassifier method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.fit">(Grid method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.fit">(MultiviewOVOWrapper method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.fit">(MultiviewOVRWrapper method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.fit">(Random method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.fit_multiview">fit_multiview() (HPSearch method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.format_params">format_params() (in module multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.format_params">(MultiClassWrapper method)</a>
-</li>
-      </ul></li>
-  </ul></td>
-</tr></table>
-
-<h2 id="G">G</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_argument_dictionaries">gen_argument_dictionaries() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_direcorties_names">gen_direcorties_names() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.gen_heat_maps">gen_heat_maps() (in module multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_k_folds">gen_k_folds() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.gen_report">gen_report() (HPSearch method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_monoview_arg_dictionary">gen_single_monoview_arg_dictionary() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_multiview_arg_dictionary">gen_single_multiview_arg_dictionary() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_splits">gen_splits() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_best_params">get_best_params() (HPSearch method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.get_candidate_params">get_candidate_params() (Grid method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_candidate_params">(HPSearch method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_candidate_params">(Random method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_csv">get_classic_db_csv() (in module multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_hdf5">get_classic_db_hdf5() (in module multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db)</a>
-</li>
-      <li><a href="references/monomulti/metrics.html#multiview_platform.mono_multi_view_classifiers.metrics.framework.get_config">get_config() (in module multiview_platform.mono_multi_view_classifiers.metrics.framework)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_config">(MultiClassWrapper method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.get_database_function">get_database_function() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.get_examples_views_indices">get_examples_views_indices() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_interpretation">get_interpretation() (MultiClassWrapper method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_label_names">get_label_names() (Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_label_names">(HDF5Dataset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_label_names">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_labels">get_labels() (Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_labels">(HDF5Dataset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_labels">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.get_mc_estim">get_mc_estim() (in module multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_metrics_scores">get_metrics_scores() (in module multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_name">get_name() (HDF5Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_name">(RAMDataset method)</a>
-</li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_class">get_nb_class() (HDF5Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_class">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_nb_examples">get_nb_examples() (Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeDset.get_nb_examples">(FakeDset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_examples">(HDF5Dataset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_examples">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.get_nb_possibilities">get_nb_possibilities() (CustomRandint method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_param_distribs">get_param_distribs() (Random method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.get_params">get_params() (MultiviewOVOWrapper method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.get_params">(MultiviewOVRWrapper method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.get_params">(OVOWrapper method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper.get_params">(OVRWrapper method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict">get_path_dict() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_plausible_db_hdf5">get_plausible_db_hdf5() (in module multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db)</a>
-</li>
-      <li><a href="references/monomulti/metrics.html#multiview_platform.mono_multi_view_classifiers.metrics.framework.get_scorer">get_scorer() (in module multiview_platform.mono_multi_view_classifiers.metrics.framework)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_scoring">get_scoring() (HPSearch method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_shape">get_shape() (Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.configuration.get_the_args">get_the_args() (in module multiview_platform.mono_multi_view_classifiers.utils.configuration)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_total_metric_scores">get_total_metric_scores() (in module multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_v">get_v() (Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_v">(HDF5Dataset method)</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_v">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_dict">get_view_dict() (HDF5Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_dict">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_name">get_view_name() (HDF5Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_name">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid">Grid (class in multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="H">H</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset">HDF5Dataset (class in multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch">HPSearch (class in multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="I">I</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_argument_dictionaries">init_argument_dictionaries() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_attrs">init_attrs() (HDF5Dataset method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.init_attrs">(RAMDataset method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark">init_benchmark() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.init_example_indces">init_example_indces() (Dataset method)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs">init_kwargs() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs">[1]</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func">init_kwargs_func() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.init_log_file">init_log_file() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps">init_monoview_exps() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps">[1]</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.init_multiple_datasets">init_multiple_datasets() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.init_multiview_exps">init_multiview_exps() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.init_random_state">init_random_state() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.init_stats_iter_random_states">init_stats_iter_random_states() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_view_names">init_view_names() (HDF5Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.init_views">init_views() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.input_">input_() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in">is_dict_in() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.is_just_number">is_just_number() (in module multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="M">M</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.make_me_noisy">make_me_noisy() (in module multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db)</a>
-</li>
-      <li>
-    module
-
-      <ul>
-        <li><a href="references/multiview_platform.html#module-multiview_platform">multiview_platform</a>
-</li>
-        <li><a href="execution.html#module-multiview_platform.execute">multiview_platform.execute</a>, <a href="references/multiview_platform.html#module-multiview_platform.execute">[1]</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers">multiview_platform.mono_multi_view_classifiers</a>
-</li>
-        <li><a href="references/monomulti/exec_classif.html#module-multiview_platform.mono_multi_view_classifiers.exec_classif">multiview_platform.mono_multi_view_classifiers.exec_classif</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.exec_classif">[1]</a>
-</li>
-        <li><a href="references/monomulti/metrics.html#module-multiview_platform.mono_multi_view_classifiers.metrics.framework">multiview_platform.mono_multi_view_classifiers.metrics.framework</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">multiview_platform.mono_multi_view_classifiers.multiview_classifiers</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion</a>
-</li>
-        <li><a href="analyzeresult.html#module-multiview_platform.mono_multi_view_classifiers.result_analysis">multiview_platform.mono_multi_view_classifiers.result_analysis</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.result_analysis">[1]</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils">multiview_platform.mono_multi_view_classifiers.utils</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.configuration">multiview_platform.mono_multi_view_classifiers.utils.configuration</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.dataset">multiview_platform.mono_multi_view_classifiers.utils.dataset</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.execution">multiview_platform.mono_multi_view_classifiers.utils.execution</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">multiview_platform.mono_multi_view_classifiers.utils.make_file_config</a>
-</li>
-        <li><a href="references/monomulti/utils/multiclass.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">multiview_platform.mono_multi_view_classifiers.utils.multiclass</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">[1]</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis</a>
-</li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.transformations">multiview_platform.mono_multi_view_classifiers.utils.transformations</a>
-</li>
-        <li><a href="references/multiview_platform.html#module-multiview_platform.tests">multiview_platform.tests</a>, <a href="references/multiview_platform.tests.html#module-multiview_platform.tests">[1]</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics">multiview_platform.tests.test_metrics</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics.test_accuracy_score">multiview_platform.tests.test_metrics.test_accuracy_score</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view">multiview_platform.tests.test_mono_view</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">multiview_platform.tests.test_mono_view.test_MonoviewUtils</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers">multiview_platform.tests.test_monoview_classifiers</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">multiview_platform.tests.test_monoview_classifiers.test_adaboost</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">multiview_platform.tests.test_monoview_classifiers.test_compatibility</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers">multiview_platform.tests.test_multiview_classifiers</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils">multiview_platform.tests.test_utils</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_configuration">multiview_platform.tests.test_utils.test_configuration</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_execution">multiview_platform.tests.test_utils.test_execution</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_GetMultiviewDB">multiview_platform.tests.test_utils.test_GetMultiviewDB</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_multiclass">multiview_platform.tests.test_utils.test_multiclass</a>
-</li>
-        <li><a href="references/multiview_platform.html#module-multiview_platform.versions">multiview_platform.versions</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper">MonoviewWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper">MultiClassWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.multiview_decision_function">multiview_decision_function() (MultiviewOVOWrapper method)</a>
-</li>
-      <li>
-    multiview_platform
-
-      <ul>
-        <li><a href="references/multiview_platform.html#module-multiview_platform">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.execute
-
-      <ul>
-        <li><a href="execution.html#module-multiview_platform.execute">module</a>, <a href="references/multiview_platform.html#module-multiview_platform.execute">[1]</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.exec_classif
-
-      <ul>
-        <li><a href="references/monomulti/exec_classif.html#module-multiview_platform.mono_multi_view_classifiers.exec_classif">module</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.exec_classif">[1]</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.metrics.framework
-
-      <ul>
-        <li><a href="references/monomulti/metrics.html#module-multiview_platform.mono_multi_view_classifiers.metrics.framework">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.multiview_classifiers
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">module</a>
-</li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.result_analysis
-
-      <ul>
-        <li><a href="analyzeresult.html#module-multiview_platform.mono_multi_view_classifiers.result_analysis">module</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.result_analysis">[1]</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.configuration
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.configuration">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.dataset
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.dataset">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.execution
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.execution">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.make_file_config
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.multiclass
-
-      <ul>
-        <li><a href="references/monomulti/utils/multiclass.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">module</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">[1]</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.mono_multi_view_classifiers.utils.transformations
-
-      <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.transformations">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests
-
-      <ul>
-        <li><a href="references/multiview_platform.html#module-multiview_platform.tests">module</a>, <a href="references/multiview_platform.tests.html#module-multiview_platform.tests">[1]</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_metrics
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_metrics.test_accuracy_score
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics.test_accuracy_score">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_mono_view
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_mono_view.test_ExecClassifMonoView
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_mono_view.test_MonoviewUtils
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_monoview_classifiers
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_monoview_classifiers.test_adaboost
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_monoview_classifiers.test_compatibility
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_multiview_classifiers
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_multiview_classifiers.test_diversity_utils
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_utils
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_utils.test_configuration
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_configuration">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_utils.test_execution
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_execution">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_utils.test_GetMultiviewDB
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_GetMultiviewDB">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.tests.test_utils.test_multiclass
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_multiclass">module</a>
-</li>
-      </ul></li>
-      <li>
-    multiview_platform.versions
-
-      <ul>
-        <li><a href="references/multiview_platform.html#module-multiview_platform.versions">module</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper">MultiviewOVOWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper">MultiviewOVRWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper">MultiviewWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="N">N</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.nb_view">nb_view (HDF5Dataset attribute)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="O">O</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper">OVOWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper">OVRWrapper (class in multiview_platform.mono_multi_view_classifiers.utils.multiclass)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="P">P</h2>
+<h2 id="M">M</h2>
 <table style="width: 100%" class="indextable genindextable"><tr>
   <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.execution.parse_the_args">parse_the_args() (in module multiview_platform.mono_multi_view_classifiers.utils.execution)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.configuration.pass_default_config">pass_default_config() (in module multiview_platform.mono_multi_view_classifiers.utils.configuration)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.predict">predict() (FakeMVClassifier method)</a>
+      <li>
+    module
 
       <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.predict">(MultiviewOVOWrapper method)</a>
+        <li><a href="references/multiview_platform.html#module-multiview_platform">multiview_platform</a>
 </li>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.predict">(MultiviewOVRWrapper method)</a>
+        <li><a href="execution.html#module-summit.execute">summit.execute</a>
 </li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb.predict_proba">predict_proba() (FakeMVClassifierProb method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim.predict_proba">(FakeProbaEstim method)</a>
+        <li><a href="analyzeresult.html#module-summit.multiview_platform.result_analysis">summit.multiview_platform.result_analysis</a>
 </li>
       </ul></li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.print_metric_score">print_metric_score() (in module multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="R">R</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset">RAMDataset (class in multiview_platform.mono_multi_view_classifiers.utils.dataset)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random">Random (class in multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
   </ul></td>
   <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.rm">rm() (HDF5Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.rvs">rvs() (CustomRandint method)</a>
+      <li>
+    multiview_platform
 
       <ul>
-        <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform.rvs">(CustomUniform method)</a>
+        <li><a href="references/multiview_platform.html#module-multiview_platform">module</a>
 </li>
       </ul></li>
   </ul></td>
@@ -879,238 +76,22 @@
 <h2 id="S">S</h2>
 <table style="width: 100%" class="indextable genindextable"><tr>
   <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.configuration.save_config">save_config() (in module multiview_platform.mono_multi_view_classifiers.utils.configuration)</a>
-</li>
-      <li><a href="references/monomulti/metrics.html#multiview_platform.mono_multi_view_classifiers.metrics.framework.score">score() (in module multiview_platform.mono_multi_view_classifiers.metrics.framework)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_metrics.html#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.score_test">score_test() (Test_accuracy_score method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_labels">select_labels() (Dataset method)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_views_and_labels">select_views_and_labels() (Dataset method)</a>
-</li>
-      <li><a href="references/monomulti/exec_classif.html#multiview_platform.mono_multi_view_classifiers.exec_classif.set_element">set_element() (in module multiview_platform.mono_multi_view_classifiers.exec_classif)</a>, <a href="references/multiview_platform.mono_multi_view_classifiers.html#multiview_platform.mono_multi_view_classifiers.exec_classif.set_element">[1]</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.set_params">set_params() (MultiClassWrapper method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genKFolds.setUp">setUp() (Test_genKFolds method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genSplits.setUp">(Test_genSplits method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.setUp">(Test_get_classic_db_csv method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.setUp">(Test_get_classic_db_hdf5 method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.setUp">(Test_initRandomState method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.setUp">(Test_parseTheArgs method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.tests.test_metrics.html#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.setUpClass">setUpClass() (Test_accuracy_score method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries.setUpClass">(Test_genArgumentDictionaries class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.setUpClass">(Test_genDirecortiesNames class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.setUpClass">(Test_genTestFoldsPreds class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.setUpClass">(Test_get_mc_estim class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.setUpClass">(Test_get_plausible_db_hdf5 class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.setUpClass">(Test_get_the_args class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.setUpClass">(Test_getDatabaseFunction class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.setUpClass">(Test_getHPs class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.setUpClass">(Test_initConstants class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.setUpClass">(Test_initStatsIterRandomStates class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.setUpClass">(Test_initTrainTest class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.setUpClass">(Test_MultiviewOVOWrapper_fit class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.setUpClass">(Test_MultiviewOVRWrapper_fit class method)</a>
-</li>
-      </ul></li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.transformations.sign_labels">sign_labels() (in module multiview_platform.mono_multi_view_classifiers.utils.transformations)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.spear_mint">spear_mint() (in module multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="T">T</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.tearDown">tearDown() (Test_get_classic_db_csv class method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.tearDown">(Test_get_classic_db_hdf5 method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.tearDown">(Test_initRandomState method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.tearDownClass">tearDownClass() (Test_get_plausible_db_hdf5 class method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.tearDownClass">(Test_get_the_args class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.tearDownClass">(Test_getHPs class method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.tearDownClass">(Test_initConstants class method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.tests.test_metrics.html#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score">Test_accuracy_score (class in multiview_platform.tests.test_metrics.test_accuracy_score)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_all_views_asked">test_all_views_asked() (Test_get_classic_db_hdf5 method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_arguments">test_arguments() (Test_get_the_args method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_asked_the_whole_dataset">test_asked_the_whole_dataset() (Test_get_classic_db_hdf5 method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_biclass">test_biclass() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_dict_format">test_dict_format() (Test_get_the_args method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.test_empty_args">test_empty_args() (Test_parseTheArgs method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_file_loading">test_file_loading() (Test_get_the_args method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_fit">test_fit() (Test_MultiviewOVOWrapper_fit method)</a>
+      <li>
+    summit.execute
 
       <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_fit">(Test_MultiviewOVRWrapper_fit method)</a>
+        <li><a href="execution.html#module-summit.execute">module</a>
 </li>
       </ul></li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries">Test_genArgumentDictionaries (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames">Test_genDirecortiesNames (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genKFolds">Test_genKFolds (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genKFolds.test_genKFolds_iter">test_genKFolds_iter() (Test_genKFolds method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genSplits">Test_genSplits (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_genSplits_no_iter">test_genSplits_no_iter() (Test_genSplits method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds">Test_genTestFoldsPreds (class in multiview_platform.tests.test_mono_view.test_MonoviewUtils)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv">Test_get_classic_db_csv (class in multiview_platform.tests.test_utils.test_GetMultiviewDB)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5">Test_get_classic_db_hdf5 (class in multiview_platform.tests.test_utils.test_GetMultiviewDB)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim">Test_get_mc_estim (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5">Test_get_plausible_db_hdf5 (class in multiview_platform.tests.test_utils.test_GetMultiviewDB)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args">Test_get_the_args (class in multiview_platform.tests.test_utils.test_configuration)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction">Test_getDatabaseFunction (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs">Test_getHPs (class in multiview_platform.tests.test_mono_view.test_ExecClassifMonoView)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_hdf5">test_hdf5() (Test_getDatabaseFunction method)</a>
-</li>
   </ul></td>
   <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants">Test_initConstants (class in multiview_platform.tests.test_mono_view.test_ExecClassifMonoView)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initRandomState">Test_initRandomState (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates">Test_initStatsIterRandomStates (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest">Test_initTrainTest (class in multiview_platform.tests.test_mono_view.test_ExecClassifMonoView)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_native">test_multiclass_native() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo">test_multiclass_ovo() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo_multiview">test_multiclass_ovo_multiview() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr">test_multiclass_ovr() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr_multiview">test_multiclass_ovr_multiview() (Test_get_mc_estim method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_multiple_iter">test_multiple_iter() (Test_initStatsIterRandomStates method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit">Test_MultiviewOVOWrapper_fit (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit">Test_MultiviewOVRWrapper_fit (class in multiview_platform.tests.test_utils.test_multiclass)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_one_statiter">test_one_statiter() (Test_initStatsIterRandomStates method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_ovo_no_iter">test_ovo_no_iter() (Test_genDirecortiesNames method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs">Test_parseTheArgs (class in multiview_platform.tests.test_utils.test_execution)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_plausible_hdf5">test_plausible_hdf5() (Test_getDatabaseFunction method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_predict">test_predict() (Test_MultiviewOVOWrapper_fit method)</a>
-
-      <ul>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_predict">(Test_MultiviewOVRWrapper_fit method)</a>
-</li>
-      </ul></li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_42">test_random_state_42() (Test_initRandomState method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_pickle">test_random_state_pickle() (Test_initRandomState method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_simple">test_simple() (Test_genSplits method)</a>
+      <li>
+    summit.multiview_platform.result_analysis
 
       <ul>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.test_simple">(Test_genTestFoldsPreds method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.test_simple">(Test_get_classic_db_csv method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_simple">(Test_get_classic_db_hdf5 method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_simple">(Test_get_plausible_db_hdf5 method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_simple">(Test_getDatabaseFunction method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.test_simple">(Test_getHPs method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.test_simple">(Test_initConstants method)</a>
-</li>
-        <li><a href="references/multiview_platform.tests.test_mono_view.html#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.test_simple">(Test_initTrainTest method)</a>
+        <li><a href="analyzeresult.html#module-summit.multiview_platform.result_analysis">module</a>
 </li>
       </ul></li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_simple_ovo">test_simple_ovo() (Test_genDirecortiesNames method)</a>
-</li>
-      <li><a href="references/multiview_platform.tests.test_utils.html#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_two_class">test_two_class() (Test_get_plausible_db_hdf5 method)</a>
-</li>
-      <li><a href="references/multiview_platform.html#multiview_platform.versions.test_versions">test_versions() (in module multiview_platform.versions)</a>
-</li>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.to_numpy_array">to_numpy_array() (Dataset method)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="U">U</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.transformations.unsign_labels">unsign_labels() (in module multiview_platform.mono_multi_view_classifiers.utils.transformations)</a>
-</li>
-  </ul></td>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.update_hdf5_dataset">update_hdf5_dataset() (HDF5Dataset method)</a>
-</li>
-  </ul></td>
-</tr></table>
-
-<h2 id="V">V</h2>
-<table style="width: 100%" class="indextable genindextable"><tr>
-  <td style="width: 33%; vertical-align: top;"><ul>
-      <li><a href="references/multiview_platform.mono_multi_view_classifiers.utils.html#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.view_dict">view_dict (HDF5Dataset attribute)</a>
-</li>
   </ul></td>
 </tr></table>
 
diff --git a/docs/build/index.html b/docs/build/index.html
index aeb07829fbf1099dcd5210966d2a6d8a6ed776cf..3e620e6cb6c62a844e4e27b08f201d0bd0ef14cf 100644
--- a/docs/build/index.html
+++ b/docs/build/index.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Welcome to Supervised MultiModal Integration Tool’s documentation &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -46,8 +48,8 @@
 <p>The main advantage of the platform is that it allows to add and remove a classifier without modifying its core code (the procedure is described thoroughly in this documentation).</p>
 <p>This documentation consists in a short read me, with instructions to install and get started with SuMMIT, then several use cases to discover the features, and all the documented sources.</p>
 <div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>The documentation, the platform and the tests are constantly being updated.
+<p class="first admonition-title">Note</p>
+<p class="last">The documentation, the platform and the tests are constantly being updated.
 All the content labelled WIP is Work In Progress</p>
 </div>
 <div class="toctree-wrapper compound">
@@ -60,9 +62,9 @@ All the content labelled WIP is Work In Progress</p>
 <div class="section" id="indices-and-tables">
 <h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline">¶</a></h1>
 <ul class="simple">
-<li><p><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></p></li>
-<li><p><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></p></li>
-<li><p><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></p></li>
+<li><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></li>
+<li><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></li>
+<li><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></li>
 </ul>
 </div>
 
diff --git a/docs/build/modules.html b/docs/build/modules.html
index 5415841dfabcc8920fa0531e83573e109e041cc7..07000e0b85b8ae80cc9d791ca128aba19d4fdd28 100644
--- a/docs/build/modules.html
+++ b/docs/build/modules.html
@@ -1,10 +1,12 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
-    <title>multiview_platform &#8212; SuMMIT 0 documentation</title>
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <title>summit &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
     
@@ -36,8 +38,8 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="multiview-platform">
-<h1>multiview_platform<a class="headerlink" href="#multiview-platform" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="summit">
+<h1>summit<a class="headerlink" href="#summit" title="Permalink to this headline">¶</a></h1>
 <div class="toctree-wrapper compound">
 </div>
 </div>
diff --git a/docs/build/objects.inv b/docs/build/objects.inv
index 913e3962e732608c7ce98665384238e9ed37f0cc..03dcd9d87af7a386908b5381115fde5df797b9c1 100644
Binary files a/docs/build/objects.inv and b/docs/build/objects.inv differ
diff --git a/docs/build/readme_link.html b/docs/build/readme_link.html
index c005acdb3706cef1b4d507d60baac38fa86be8c0..e03dd7ec91b69a85114bb4c5aa89edb82308444d 100644
--- a/docs/build/readme_link.html
+++ b/docs/build/readme_link.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>&lt;no title&gt; &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -16,9 +18,7 @@
     <script src="_static/plotly_js.js"></script>
     
     <link rel="index" title="Index" href="genindex.html" />
-    <link rel="search" title="Search" href="search.html" />
-    <link rel="next" title="SuMMIT Tutorials" href="tutorials/index.html" />
-    <link rel="prev" title="Welcome to Supervised MultiModal Integration Tool’s documentation" href="index.html" /> 
+    <link rel="search" title="Search" href="search.html" /> 
   </head><body>
     <div class="related" role="navigation" aria-label="related navigation">
       <h3>Navigation</h3>
@@ -29,12 +29,6 @@
         <li class="right" >
           <a href="py-modindex.html" title="Python Module Index"
              >modules</a> |</li>
-        <li class="right" >
-          <a href="tutorials/index.html" title="SuMMIT Tutorials"
-             accesskey="N">next</a> |</li>
-        <li class="right" >
-          <a href="index.html" title="Welcome to Supervised MultiModal Integration Tool’s documentation"
-             accesskey="P">previous</a> |</li>
         <li class="nav-item nav-item-0"><a href="index.html">SuMMIT 0 documentation</a> &#187;</li> 
       </ul>
     </div>  
@@ -52,12 +46,6 @@
       </div>
       <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
         <div class="sphinxsidebarwrapper">
-  <h4>Previous topic</h4>
-  <p class="topless"><a href="index.html"
-                        title="previous chapter">Welcome to Supervised MultiModal Integration Tool’s documentation</a></p>
-  <h4>Next topic</h4>
-  <p class="topless"><a href="tutorials/index.html"
-                        title="next chapter">SuMMIT Tutorials</a></p>
   <div role="note" aria-label="source link">
     <h3>This Page</h3>
     <ul class="this-page-menu">
@@ -88,12 +76,6 @@
         <li class="right" >
           <a href="py-modindex.html" title="Python Module Index"
              >modules</a> |</li>
-        <li class="right" >
-          <a href="tutorials/index.html" title="SuMMIT Tutorials"
-             >next</a> |</li>
-        <li class="right" >
-          <a href="index.html" title="Welcome to Supervised MultiModal Integration Tool’s documentation"
-             >previous</a> |</li>
         <li class="nav-item nav-item-0"><a href="index.html">SuMMIT 0 documentation</a> &#187;</li> 
       </ul>
     </div>
diff --git a/docs/build/references/monomulti/exec_classif.html b/docs/build/references/monomulti/exec_classif.html
index f1550fe7deb26b4947f4f3c6459a634899f29dfb..7e857a5354e22e1c7a2e6afa5048be41c348b310 100644
--- a/docs/build/references/monomulti/exec_classif.html
+++ b/docs/build/references/monomulti/exec_classif.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Classification execution module &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
@@ -36,226 +38,8 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="module-multiview_platform.mono_multi_view_classifiers.exec_classif">
-<span id="classification-execution-module"></span><h1>Classification execution module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.exec_classif" title="Permalink to this headline">¶</a></h1>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics">
-<code class="sig-name descname">arange_metrics</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metrics</span></em>, <em class="sig-param"><span class="n">metric_princ</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get the metrics list in the right order so that
-the first one is the principal metric specified in args</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>metrics</strong> (<em>dict</em>) – The metrics that will be used in the benchmark</p></li>
-<li><p><strong>metric_princ</strong> (<em>str</em>) – The name of the metric that need to be used for the hyper-parameter
-optimization process</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>metrics</strong> – The metrics list, but arranged  so the first one is the principal one.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of lists</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init">
-<code class="sig-name descname">benchmark_init</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">directory</span></em>, <em class="sig-param"><span class="n">classification_indices</span></em>, <em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">labels_dictionary</span></em>, <em class="sig-param"><span class="n">k_folds</span></em>, <em class="sig-param"><span class="n">dataset_var</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init" title="Permalink to this definition">¶</a></dt>
-<dd><p>Initializes the benchmark, by saving the indices of the train
-examples and the cross validation folds.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>directory</strong> (<em>str</em>) – The benchmark’s result directory</p></li>
-<li><p><strong>classification_indices</strong> (<em>numpy array</em>) – The indices of the examples, splitted for the train/test split</p></li>
-<li><p><strong>labels</strong> (<em>numpy array</em>) – The labels of the dataset</p></li>
-<li><p><strong>labels_dictionary</strong> (<em>dict</em>) – The dictionary with labels as keys and their names as values</p></li>
-<li><p><strong>k_folds</strong> (<em>sklearn.model_selection.Folds object</em>) – The folds for the cross validation process</p></li>
-</ul>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark">
-<code class="sig-name descname">exec_benchmark</code><span class="sig-paren">(</span><em class="sig-param">nb_cores</em>, <em class="sig-param">stats_iter</em>, <em class="sig-param">benchmark_arguments_dictionaries</em>, <em class="sig-param">directory</em>, <em class="sig-param">metrics</em>, <em class="sig-param">dataset_var</em>, <em class="sig-param">track_tracebacks</em>, <em class="sig-param">exec_one_benchmark_mono_core=&lt;function exec_one_benchmark_mono_core&gt;</em>, <em class="sig-param">analyze=&lt;function analyze&gt;</em>, <em class="sig-param">delete=&lt;function delete_HDF5&gt;</em>, <em class="sig-param">analyze_iterations=&lt;function analyze_iterations&gt;</em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to execute the needed benchmark(s) on multicore or mono-core functions.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>nb_cores</strong> (<em>int</em>) – Number of threads that the benchmarks can use.</p></li>
-<li><p><strong>stats_iter</strong> (<em>int</em>) – Number of statistical iterations that have to be done.</p></li>
-<li><p><strong>benchmark_arguments_dictionaries</strong> (<em>list of dictionaries</em>) – All the needed arguments for the benchmarks.</p></li>
-<li><p><strong>classification_indices</strong> (<em>list of lists of numpy.ndarray</em>) – For each statistical iteration a couple of numpy.ndarrays is stored with the indices for the training set and
-the ones of the testing set.</p></li>
-<li><p><strong>directories</strong> (<em>list of strings</em>) – List of the paths to the result directories for each statistical iteration.</p></li>
-<li><p><strong>directory</strong> (<em>string</em>) – Path to the main results directory.</p></li>
-<li><p><strong>multi_class_labels</strong> (<em>ist of lists of numpy.ndarray</em>) – For each label couple, for each statistical iteration a triplet of numpy.ndarrays is stored with the
-indices for the biclass training set, the ones for the biclass testing set and the ones for the
-multiclass testing set.</p></li>
-<li><p><strong>metrics</strong> (<em>list of lists</em>) – metrics that will be used to evaluate the algorithms performance.</p></li>
-<li><p><strong>labels_dictionary</strong> (<em>dictionary</em>) – Dictionary mapping labels indices to labels names.</p></li>
-<li><p><strong>nb_labels</strong> (<em>int</em>) – Total number of different labels in the dataset.</p></li>
-<li><p><strong>dataset_var</strong> (<em>HDF5 dataset file</em>) – The full dataset that wil be used by the benchmark.</p></li>
-<li><p><strong>classifiers_names</strong> (<em>list of strings</em>) – List of the benchmarks’s monoview classifiers names.</p></li>
-<li><p><strong>rest_of_the_args</strong> – Just used for testing purposes</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>results</strong> – The results of the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of lists</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif">
-<code class="sig-name descname">exec_classif</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">arguments</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif" title="Permalink to this definition">¶</a></dt>
-<dd><p>Runs the benchmark with the given arguments</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>arguments</strong> – </p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><ul class="simple">
-<li><p><em>&gt;&gt;&gt; exec_classif([–config_path, /path/to/config/files/])</em></p></li>
-<li><p><em>&gt;&gt;&gt;</em></p></li>
-</ul>
-</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict">
-<code class="sig-name descname">extract_dict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_config</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict" title="Permalink to this definition">¶</a></dt>
-<dd><p>Reverse function of get_path_dict</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict">
-<code class="sig-name descname">get_path_dict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">multiview_classifier_args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict" title="Permalink to this definition">¶</a></dt>
-<dd><p>This function is used to generate a dictionary with each key being
-the path to the value.
-If given {“key1”:{“key1_1”:value1}, “key2”:value2}, it will return
-{“key1.key1_1”:value1, “key2”:value2}</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark">
-<code class="sig-name descname">init_benchmark</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">cl_type</span></em>, <em class="sig-param"><span class="n">monoview_algos</span></em>, <em class="sig-param"><span class="n">multiview_algos</span></em>, <em class="sig-param"><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to create a list of all the algorithm packages names used for the benchmark.</p>
-<p>First this function will check if the benchmark need mono- or/and multiview
-algorithms and adds to the right
-dictionary the asked algorithms. If none is asked by the user, all will be added.</p>
-<p>If the keyword <cite>“Benchmark”</cite> is used, all mono- and multiview algorithms will be added.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>cl_type</strong> (<em>List of string</em>) – List of types of needed benchmark</p></li>
-<li><p><strong>multiview_algos</strong> (<em>List of strings</em>) – List of multiview algorithms needed for the benchmark</p></li>
-<li><p><strong>monoview_algos</strong> (<em>Listof strings</em>) – List of monoview algorithms needed for the benchmark</p></li>
-<li><p><strong>args</strong> (<em>ParsedArgumentParser args</em>) – All the input args (used to tune the algorithms)</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>benchmark</strong> – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary of dictionaries</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs">
-<code class="sig-name descname">init_kwargs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">args</span></em>, <em class="sig-param"><span class="n">classifiers_names</span></em>, <em class="sig-param"><span class="n">framework</span><span class="o">=</span><span class="default_value">'monoview'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init kwargs thanks to a function in each monoview classifier package.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>args</strong> (<em>parsed args objects</em>) – All the args passed by the user.</p></li>
-<li><p><strong>classifiers_names</strong> (<em>list of strings</em>) – List of the benchmarks’s monoview classifiers names.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><p><strong>kwargs</strong> – Dictionary resuming all the specific arguments for the benchmark, one dictionary for each classifier.</p>
-<p>For example, for Adaboost, the KWARGS will be <cite>{“n_estimators”:&lt;value&gt;, “base_estimator”:&lt;value&gt;}</cite></p>
-</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func">
-<code class="sig-name descname">init_kwargs_func</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">args</span></em>, <em class="sig-param"><span class="n">benchmark</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func" title="Permalink to this definition">¶</a></dt>
-<dd><p>Dispached the kwargs initialization to monoview and multiview and creates
-the kwargs variable</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>args</strong> (<em>parsed args objects</em>) – All the args passed by the user.</p></li>
-<li><p><strong>benchmark</strong> (<em>dict</em>) – The name of the mono- and mutli-view classifiers to run in the benchmark</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>kwargs</strong> – The arguments for each mono- and multiview algorithms</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>dict</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps">
-<code class="sig-name descname">init_monoview_exps</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_names</span></em>, <em class="sig-param"><span class="n">views_dictionary</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">kwargs_init</span></em>, <em class="sig-param"><span class="n">hps_method</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to add each monoview exeperience args to the list of monoview experiences args.</p>
-<p>First this function will check if the benchmark need mono- or/and multiview algorithms and adds to the right
-dictionary the asked algorithms. If none is asked by the user, all will be added.</p>
-<p>If the keyword <cite>“Benchmark”</cite> is used, all mono- and multiview algorithms will be added.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>classifier_names</strong> (<em>dictionary</em>) – All types of monoview and multiview experiments that have to be benchmarked</p></li>
-<li><p><strong>argument_dictionaries</strong> (<em>dictionary</em>) – Maps monoview and multiview experiments arguments.</p></li>
-<li><p><strong>views_dictionary</strong> (<em>dictionary</em>) – Maps the view names to their index in the HDF5 dataset</p></li>
-<li><p><strong>nb_class</strong> (<em>integer</em>) – Number of different labels in the classification</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>benchmark</strong> – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary of dictionaries</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in">
-<code class="sig-name descname">is_dict_in</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dictionary</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in" title="Permalink to this definition">¶</a></dt>
-<dd><p>Returns True if any of the dictionary value is a dictionary itself.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>dictionary</strong> – </p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.set_element">
-<code class="sig-name descname">set_element</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dictionary</span></em>, <em class="sig-param"><span class="n">path</span></em>, <em class="sig-param"><span class="n">value</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.set_element" title="Permalink to this definition">¶</a></dt>
-<dd><p>Set value in dictionary at the location indicated by path</p>
-</dd></dl>
-
+  <div class="section" id="classification-execution-module">
+<h1>Classification execution module<a class="headerlink" href="#classification-execution-module" title="Permalink to this headline">¶</a></h1>
 </div>
 
 
diff --git a/docs/build/references/monomulti/metrics.html b/docs/build/references/monomulti/metrics.html
index 8b1ec1def397ad052ae51c1ef5f712c169f51d28..e7b7360b01f2a09553af3a2ae1cd44170d352c46 100644
--- a/docs/build/references/monomulti/metrics.html
+++ b/docs/build/references/monomulti/metrics.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Metrics framework &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
@@ -36,72 +38,8 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="module-multiview_platform.mono_multi_view_classifiers.metrics.framework">
-<span id="metrics-framework"></span><h1>Metrics framework<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.metrics.framework" title="Permalink to this headline">¶</a></h1>
-<p>In ths file, we explain how to add a metric to the platform.</p>
-<p>In order to do that, on needs to add a file with the following functions
-which are mandatory for the metric to work with the platform.</p>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.metrics.framework.get_config">
-<code class="sig-name descname">get_config</code><span class="sig-paren">(</span><em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.metrics.framework.get_config" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get the metric’s configuration as a string.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>kwargs</strong> (<em>dict</em>) – The arguments stored in this dictionary must be keyed by string of
-integers as “0”, .., etc and decrypted in the function. These arguments
-are a configuration of the metric.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>configString</strong> – The string describing the metric’s configuration.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>string</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.metrics.framework.get_scorer">
-<code class="sig-name descname">get_scorer</code><span class="sig-paren">(</span><em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.metrics.framework.get_scorer" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get the metric’s scorer as in the sklearn.metrics package.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>kwargs</strong> (<em>dict</em>) – The arguments stored in this dictionary must be keyed by string of
-integers as “0”, .., etc and decrypted in the function. These arguments
-are a configuration of the metric.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>scorer</strong> – Callable object that returns a scalar score; greater is better. (cf sklearn.metrics.make_scorer)</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>object</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.metrics.framework.score">
-<code class="sig-name descname">score</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">y_true</span></em>, <em class="sig-param"><span class="n">y_pred</span></em>, <em class="sig-param"><span class="n">multiclass</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.metrics.framework.score" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get the metric’s score from the ground truth (<code class="docutils literal notranslate"><span class="pre">y_true</span></code>) and predictions (<code class="docutils literal notranslate"><span class="pre">y_pred</span></code>).</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>y_true</strong> (<em>array-like</em><em>, </em><em>shape =</em><em> (</em><em>n_samples</em><em>,</em><em>)</em>) – Target values (class labels).</p></li>
-<li><p><strong>y_pred</strong> (<em>array-like</em><em>, </em><em>shape =</em><em> (</em><em>n_samples</em><em>,</em><em>)</em>) – Predicted target values (class labels).</p></li>
-<li><p><strong>multiclass</strong> (<em>boolean</em><em> (</em><em>default=False</em><em>)</em>) – Parameter specifying whether the target values are multiclass or not.</p></li>
-<li><p><strong>kwargs</strong> (<em>dict</em>) – The arguments stored in this dictionary must be keyed by string of
-integers as “0”, .., etc and decrypted in the function</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>score</strong> – Returns the score of the prediction.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>float</p>
-</dd>
-</dl>
-</dd></dl>
-
+  <div class="section" id="metrics-framework">
+<h1>Metrics framework<a class="headerlink" href="#metrics-framework" title="Permalink to this headline">¶</a></h1>
 </div>
 
 
diff --git a/docs/build/references/monomulti/multiview_classifiers/diversity_fusion.html b/docs/build/references/monomulti/multiview_classifiers/diversity_fusion.html
index cdbe29d79581d156e02b7cfe04193ee7c28d146b..5cbb63047e40b18818cdd52986f6ba43f2ef9e22 100644
--- a/docs/build/references/monomulti/multiview_classifiers/diversity_fusion.html
+++ b/docs/build/references/monomulti/multiview_classifiers/diversity_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Diversity Fusion Classifiers &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../../../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/monomulti/utils/execution.html b/docs/build/references/monomulti/utils/execution.html
index eceb655cad8f01602b242256b1c60d5bd3852ec2..4b278a5259e5a269f5d89853e8b877f8c6adceef 100644
--- a/docs/build/references/monomulti/utils/execution.html
+++ b/docs/build/references/monomulti/utils/execution.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Utils execution module &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../../../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
@@ -38,14 +40,23 @@
             
   <div class="section" id="utils-execution-module">
 <h1>Utils execution module<a class="headerlink" href="#utils-execution-module" title="Permalink to this headline">¶</a></h1>
-<dl class="field-list simple">
-<dt class="field-odd">members</dt>
-<dd class="field-odd"><dl class="field-list simple">
-<dt class="field-odd">inherited-members</dt>
-<dd class="field-odd"><p></p></dd>
-</dl>
-</dd>
-</dl>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">members:</th><td class="field-body"><table class="first last docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name" colspan="2">inherited-members:</th></tr>
+<tr class="field-odd field"><td>&#160;</td><td class="field-body"></td>
+</tr>
+</tbody>
+</table>
+</td>
+</tr>
+</tbody>
+</table>
 </div>
 
 
diff --git a/docs/build/references/monomulti/utils/multiclass.html b/docs/build/references/monomulti/utils/multiclass.html
index 1c76e067c88f12346c67dcc5a0255093cd068fb3..dfb3f73ab76f5692e4a1efbde04458814defa969 100644
--- a/docs/build/references/monomulti/utils/multiclass.html
+++ b/docs/build/references/monomulti/utils/multiclass.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Utils Multiclass module &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../../../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
@@ -36,16 +38,25 @@
         <div class="bodywrapper">
           <div class="body" role="main">
             
-  <div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">
-<span id="utils-multiclass-module"></span><h1>Utils Multiclass module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass" title="Permalink to this headline">¶</a></h1>
-<dl class="field-list simple">
-<dt class="field-odd">members</dt>
-<dd class="field-odd"><dl class="field-list simple">
-<dt class="field-odd">inherited-members</dt>
-<dd class="field-odd"><p></p></dd>
-</dl>
-</dd>
-</dl>
+  <div class="section" id="utils-multiclass-module">
+<h1>Utils Multiclass module<a class="headerlink" href="#utils-multiclass-module" title="Permalink to this headline">¶</a></h1>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name">members:</th><td class="field-body"><table class="first last docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field-odd field"><th class="field-name" colspan="2">inherited-members:</th></tr>
+<tr class="field-odd field"><td>&#160;</td><td class="field-body"></td>
+</tr>
+</tbody>
+</table>
+</td>
+</tr>
+</tbody>
+</table>
 </div>
 
 
diff --git a/docs/build/references/monomultidoc.html b/docs/build/references/monomultidoc.html
index cdfbc92798201d9dab14e56a97745a8053a94e66..a0585a88643273b89bdaa814e4d7f20ed6ebd765 100644
--- a/docs/build/references/monomultidoc.html
+++ b/docs/build/references/monomultidoc.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Mono and mutliview classification &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.html b/docs/build/references/multiview_platform.html
index 140e78e828608beb1a2947e9780ced1b63e5b243..4c85b8c91552d5c16029b59ec75b51d551d49667 100644
--- a/docs/build/references/multiview_platform.html
+++ b/docs/build/references/multiview_platform.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform references &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -58,28 +60,28 @@
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.analyze_results module</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.difficulty_fusion module</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">Module contents</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l5"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package</a><ul>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.analyze_results module</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.disagree_fusion module</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">Module contents</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l5"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package</a><ul>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.analyze_results module</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.double_fault_fusion module</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">Module contents</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l5"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package</a><ul>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.analyze_results module</a></li>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.entropy_fusion module</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">Module contents</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l5"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package</a><ul>
@@ -141,51 +143,51 @@
 </li>
 </ul>
 </li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html">multiview_platform.mono_multi_view_classifiers.utils package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.configuration">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.dataset">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.execution">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.transformations">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-configuration-module">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-dataset-module">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-execution-module">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-multiclass-module">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-transformations-module">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
 </li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.exec_classif">multiview_platform.mono_multi_view_classifiers.exec_classif module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.result_analysis">multiview_platform.mono_multi_view_classifiers.result_analysis module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#module-multiview_platform.mono_multi_view_classifiers">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#multiview-platform-mono-multi-view-classifiers-exec-classif-module">multiview_platform.mono_multi_view_classifiers.exec_classif module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#multiview-platform-mono-multi-view-classifiers-result-analysis-module">multiview_platform.mono_multi_view_classifiers.result_analysis module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.html">multiview_platform.tests package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#subpackages">Subpackages</a><ul>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_metrics.html">multiview_platform.tests.test_metrics package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics.test_accuracy_score">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#multiview-platform-tests-test-metrics-test-accuracy-score-module">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html">multiview_platform.tests.test_mono_view package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#multiview-platform-tests-test-mono-view-test-execclassifmonoview-module">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#multiview-platform-tests-test-mono-view-test-monoviewutils-module">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html">multiview_platform.tests.test_monoview_classifiers package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#multiview-platform-tests-test-monoview-classifiers-test-adaboost-module">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#multiview-platform-tests-test-monoview-classifiers-test-compatibility-module">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html">multiview_platform.tests.test_multiview_classifiers package</a><ul>
@@ -225,24 +227,24 @@
 </li>
 <li class="toctree-l5"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package</a><ul>
 <li class="toctree-l6"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#submodules">Submodules</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
-<li class="toctree-l6"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">Module contents</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
+<li class="toctree-l6"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
 </li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_utils.html">multiview_platform.tests.test_utils package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_GetMultiviewDB">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_configuration">multiview_platform.tests.test_utils.test_configuration module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_execution">multiview_platform.tests.test_utils.test_execution module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_multiclass">multiview_platform.tests.test_utils.test_multiclass module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-getmultiviewdb-module">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-configuration-module">multiview_platform.tests.test_utils.test_configuration module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-execution-module">multiview_platform.tests.test_utils.test_execution module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-multiclass-module">multiview_platform.tests.test_utils.test_multiclass module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
@@ -250,7 +252,7 @@
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#submodules">Submodules</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#multiview-platform-tests-test-execclassif-module">multiview_platform.tests.test_ExecClassif module</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#multiview-platform-tests-test-resultanalysis-module">multiview_platform.tests.test_ResultAnalysis module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#module-multiview_platform.tests">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
@@ -259,30 +261,17 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.execute">
-<span id="multiview-platform-execute-module"></span><h2>multiview_platform.execute module<a class="headerlink" href="#module-multiview_platform.execute" title="Permalink to this headline">¶</a></h2>
-<p>This is the execution module, used to execute the code</p>
-<dl class="py function">
-<dt id="multiview_platform.execute.execute">
-<code class="sig-name descname">execute</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">config_path</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.execute.execute" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
+<div class="section" id="multiview-platform-execute-module">
+<h2>multiview_platform.execute module<a class="headerlink" href="#multiview-platform-execute-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests">
-<span id="multiview-platform-tests-module"></span><h2>multiview_platform.tests module<a class="headerlink" href="#module-multiview_platform.tests" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-tests-module">
+<h2>multiview_platform.tests module<a class="headerlink" href="#multiview-platform-tests-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.versions">
-<span id="multiview-platform-versions-module"></span><h2>multiview_platform.versions module<a class="headerlink" href="#module-multiview_platform.versions" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.versions.test_versions">
-<code class="sig-name descname">test_versions</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.versions.test_versions" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to test if all prerequisites are installed</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-versions-module">
+<h2>multiview_platform.versions module<a class="headerlink" href="#multiview-platform-versions-module" title="Permalink to this headline">¶</a></h2>
 </div>
 <div class="section" id="module-multiview_platform">
 <span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform" title="Permalink to this headline">¶</a></h2>
-<p>This is a test docstring to test stuff</p>
 </div>
 </div>
 
@@ -297,9 +286,9 @@
 <li><a class="reference internal" href="#">multiview_platform references</a><ul>
 <li><a class="reference internal" href="#subpackages">Subpackages</a></li>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.execute">multiview_platform.execute module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests">multiview_platform.tests module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.versions">multiview_platform.versions module</a></li>
+<li><a class="reference internal" href="#multiview-platform-execute-module">multiview_platform.execute module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-module">multiview_platform.tests module</a></li>
+<li><a class="reference internal" href="#multiview-platform-versions-module">multiview_platform.versions module</a></li>
 <li><a class="reference internal" href="#module-multiview_platform">Module contents</a></li>
 </ul>
 </li>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.html
index 6c72f5b3728c7fb78a23b29808ec82fde7a1c5eb..079fedb0e9a599aa94f6aab5483f83e296169732 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -57,28 +59,28 @@
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.analyze_results module</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.difficulty_fusion module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.analyze_results module</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.disagree_fusion module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.analyze_results module</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.double_fault_fusion module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.analyze_results module</a></li>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.entropy_fusion module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package</a><ul>
@@ -140,21 +142,21 @@
 </li>
 </ul>
 </li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html">multiview_platform.mono_multi_view_classifiers.utils package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.configuration">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.dataset">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.execution">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils.transformations">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-multiview_platform.mono_multi_view_classifiers.utils">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-configuration-module">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-dataset-module">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-execution-module">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-multiclass-module">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#multiview-platform-mono-multi-view-classifiers-utils-transformations-module">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.utils.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
@@ -163,257 +165,14 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.exec_classif">
-<span id="multiview-platform-mono-multi-view-classifiers-exec-classif-module"></span><h2>multiview_platform.mono_multi_view_classifiers.exec_classif module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.exec_classif" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics">
-<code class="sig-name descname">arange_metrics</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metrics</span></em>, <em class="sig-param"><span class="n">metric_princ</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.arange_metrics" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get the metrics list in the right order so that
-the first one is the principal metric specified in args</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>metrics</strong> (<em>dict</em>) – The metrics that will be used in the benchmark</p></li>
-<li><p><strong>metric_princ</strong> (<em>str</em>) – The name of the metric that need to be used for the hyper-parameter
-optimization process</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>metrics</strong> – The metrics list, but arranged  so the first one is the principal one.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of lists</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init">
-<code class="sig-name descname">benchmark_init</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">directory</span></em>, <em class="sig-param"><span class="n">classification_indices</span></em>, <em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">labels_dictionary</span></em>, <em class="sig-param"><span class="n">k_folds</span></em>, <em class="sig-param"><span class="n">dataset_var</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.benchmark_init" title="Permalink to this definition">¶</a></dt>
-<dd><p>Initializes the benchmark, by saving the indices of the train
-examples and the cross validation folds.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>directory</strong> (<em>str</em>) – The benchmark’s result directory</p></li>
-<li><p><strong>classification_indices</strong> (<em>numpy array</em>) – The indices of the examples, splitted for the train/test split</p></li>
-<li><p><strong>labels</strong> (<em>numpy array</em>) – The labels of the dataset</p></li>
-<li><p><strong>labels_dictionary</strong> (<em>dict</em>) – The dictionary with labels as keys and their names as values</p></li>
-<li><p><strong>k_folds</strong> (<em>sklearn.model_selection.Folds object</em>) – The folds for the cross validation process</p></li>
-</ul>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark">
-<code class="sig-name descname">exec_benchmark</code><span class="sig-paren">(</span><em class="sig-param">nb_cores</em>, <em class="sig-param">stats_iter</em>, <em class="sig-param">benchmark_arguments_dictionaries</em>, <em class="sig-param">directory</em>, <em class="sig-param">metrics</em>, <em class="sig-param">dataset_var</em>, <em class="sig-param">track_tracebacks</em>, <em class="sig-param">exec_one_benchmark_mono_core=&lt;function exec_one_benchmark_mono_core&gt;</em>, <em class="sig-param">analyze=&lt;function analyze&gt;</em>, <em class="sig-param">delete=&lt;function delete_HDF5&gt;</em>, <em class="sig-param">analyze_iterations=&lt;function analyze_iterations&gt;</em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_benchmark" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to execute the needed benchmark(s) on multicore or mono-core functions.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>nb_cores</strong> (<em>int</em>) – Number of threads that the benchmarks can use.</p></li>
-<li><p><strong>stats_iter</strong> (<em>int</em>) – Number of statistical iterations that have to be done.</p></li>
-<li><p><strong>benchmark_arguments_dictionaries</strong> (<em>list of dictionaries</em>) – All the needed arguments for the benchmarks.</p></li>
-<li><p><strong>classification_indices</strong> (<em>list of lists of numpy.ndarray</em>) – For each statistical iteration a couple of numpy.ndarrays is stored with the indices for the training set and
-the ones of the testing set.</p></li>
-<li><p><strong>directories</strong> (<em>list of strings</em>) – List of the paths to the result directories for each statistical iteration.</p></li>
-<li><p><strong>directory</strong> (<em>string</em>) – Path to the main results directory.</p></li>
-<li><p><strong>multi_class_labels</strong> (<em>ist of lists of numpy.ndarray</em>) – For each label couple, for each statistical iteration a triplet of numpy.ndarrays is stored with the
-indices for the biclass training set, the ones for the biclass testing set and the ones for the
-multiclass testing set.</p></li>
-<li><p><strong>metrics</strong> (<em>list of lists</em>) – metrics that will be used to evaluate the algorithms performance.</p></li>
-<li><p><strong>labels_dictionary</strong> (<em>dictionary</em>) – Dictionary mapping labels indices to labels names.</p></li>
-<li><p><strong>nb_labels</strong> (<em>int</em>) – Total number of different labels in the dataset.</p></li>
-<li><p><strong>dataset_var</strong> (<em>HDF5 dataset file</em>) – The full dataset that wil be used by the benchmark.</p></li>
-<li><p><strong>classifiers_names</strong> (<em>list of strings</em>) – List of the benchmarks’s monoview classifiers names.</p></li>
-<li><p><strong>rest_of_the_args</strong> – Just used for testing purposes</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>results</strong> – The results of the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of lists</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif">
-<code class="sig-name descname">exec_classif</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">arguments</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_classif" title="Permalink to this definition">¶</a></dt>
-<dd><p>Runs the benchmark with the given arguments</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>arguments</strong> – </p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><ul class="simple">
-<li><p><em>&gt;&gt;&gt; exec_classif([–config_path, /path/to/config/files/])</em></p></li>
-<li><p><em>&gt;&gt;&gt;</em></p></li>
-</ul>
-</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.exec_one_benchmark_mono_core">
-<code class="sig-name descname">exec_one_benchmark_mono_core</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dataset_var</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels_dictionary</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">directory</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classification_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">args</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">k_folds</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">hyper_param_search</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">metrics</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">argument_dictionaries</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">benchmark</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">views</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">views_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">flag</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">track_tracebacks</span><span class="o">=</span><span class="default_value">False</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.exec_one_benchmark_mono_core" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict">
-<code class="sig-name descname">extract_dict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_config</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.extract_dict" title="Permalink to this definition">¶</a></dt>
-<dd><p>Reverse function of get_path_dict</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_monoview_arg_dictionary">
-<code class="sig-name descname">gen_single_monoview_arg_dictionary</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_name</span></em>, <em class="sig-param"><span class="n">arguments</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">view_index</span></em>, <em class="sig-param"><span class="n">view_name</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_monoview_arg_dictionary" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_multiview_arg_dictionary">
-<code class="sig-name descname">gen_single_multiview_arg_dictionary</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_name</span></em>, <em class="sig-param"><span class="n">arguments</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em>, <em class="sig-param"><span class="n">views_dictionary</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.gen_single_multiview_arg_dictionary" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict">
-<code class="sig-name descname">get_path_dict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">multiview_classifier_args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.get_path_dict" title="Permalink to this definition">¶</a></dt>
-<dd><p>This function is used to generate a dictionary with each key being
-the path to the value.
-If given {“key1”:{“key1_1”:value1}, “key2”:value2}, it will return
-{“key1.key1_1”:value1, “key2”:value2}</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_argument_dictionaries">
-<code class="sig-name descname">init_argument_dictionaries</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">benchmark</span></em>, <em class="sig-param"><span class="n">views_dictionary</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">init_kwargs</span></em>, <em class="sig-param"><span class="n">hps_method</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_argument_dictionaries" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark">
-<code class="sig-name descname">init_benchmark</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">cl_type</span></em>, <em class="sig-param"><span class="n">monoview_algos</span></em>, <em class="sig-param"><span class="n">multiview_algos</span></em>, <em class="sig-param"><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_benchmark" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to create a list of all the algorithm packages names used for the benchmark.</p>
-<p>First this function will check if the benchmark need mono- or/and multiview
-algorithms and adds to the right
-dictionary the asked algorithms. If none is asked by the user, all will be added.</p>
-<p>If the keyword <cite>“Benchmark”</cite> is used, all mono- and multiview algorithms will be added.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>cl_type</strong> (<em>List of string</em>) – List of types of needed benchmark</p></li>
-<li><p><strong>multiview_algos</strong> (<em>List of strings</em>) – List of multiview algorithms needed for the benchmark</p></li>
-<li><p><strong>monoview_algos</strong> (<em>Listof strings</em>) – List of monoview algorithms needed for the benchmark</p></li>
-<li><p><strong>args</strong> (<em>ParsedArgumentParser args</em>) – All the input args (used to tune the algorithms)</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>benchmark</strong> – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary of dictionaries</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs">
-<code class="sig-name descname">init_kwargs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">args</span></em>, <em class="sig-param"><span class="n">classifiers_names</span></em>, <em class="sig-param"><span class="n">framework</span><span class="o">=</span><span class="default_value">'monoview'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init kwargs thanks to a function in each monoview classifier package.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>args</strong> (<em>parsed args objects</em>) – All the args passed by the user.</p></li>
-<li><p><strong>classifiers_names</strong> (<em>list of strings</em>) – List of the benchmarks’s monoview classifiers names.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><p><strong>kwargs</strong> – Dictionary resuming all the specific arguments for the benchmark, one dictionary for each classifier.</p>
-<p>For example, for Adaboost, the KWARGS will be <cite>{“n_estimators”:&lt;value&gt;, “base_estimator”:&lt;value&gt;}</cite></p>
-</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func">
-<code class="sig-name descname">init_kwargs_func</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">args</span></em>, <em class="sig-param"><span class="n">benchmark</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_kwargs_func" title="Permalink to this definition">¶</a></dt>
-<dd><p>Dispached the kwargs initialization to monoview and multiview and creates
-the kwargs variable</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>args</strong> (<em>parsed args objects</em>) – All the args passed by the user.</p></li>
-<li><p><strong>benchmark</strong> (<em>dict</em>) – The name of the mono- and mutli-view classifiers to run in the benchmark</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>kwargs</strong> – The arguments for each mono- and multiview algorithms</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>dict</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps">
-<code class="sig-name descname">init_monoview_exps</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_names</span></em>, <em class="sig-param"><span class="n">views_dictionary</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">kwargs_init</span></em>, <em class="sig-param"><span class="n">hps_method</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_monoview_exps" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to add each monoview exeperience args to the list of monoview experiences args.</p>
-<p>First this function will check if the benchmark need mono- or/and multiview algorithms and adds to the right
-dictionary the asked algorithms. If none is asked by the user, all will be added.</p>
-<p>If the keyword <cite>“Benchmark”</cite> is used, all mono- and multiview algorithms will be added.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>classifier_names</strong> (<em>dictionary</em>) – All types of monoview and multiview experiments that have to be benchmarked</p></li>
-<li><p><strong>argument_dictionaries</strong> (<em>dictionary</em>) – Maps monoview and multiview experiments arguments.</p></li>
-<li><p><strong>views_dictionary</strong> (<em>dictionary</em>) – Maps the view names to their index in the HDF5 dataset</p></li>
-<li><p><strong>nb_class</strong> (<em>integer</em>) – Number of different labels in the classification</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>benchmark</strong> – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Dictionary of dictionaries</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.init_multiview_exps">
-<code class="sig-name descname">init_multiview_exps</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_names</span></em>, <em class="sig-param"><span class="n">views_dictionary</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">kwargs_init</span></em>, <em class="sig-param"><span class="n">hps_method</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.init_multiview_exps" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in">
-<code class="sig-name descname">is_dict_in</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dictionary</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.is_dict_in" title="Permalink to this definition">¶</a></dt>
-<dd><p>Returns True if any of the dictionary value is a dictionary itself.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>dictionary</strong> – </p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.exec_classif.set_element">
-<code class="sig-name descname">set_element</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dictionary</span></em>, <em class="sig-param"><span class="n">path</span></em>, <em class="sig-param"><span class="n">value</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.exec_classif.set_element" title="Permalink to this definition">¶</a></dt>
-<dd><p>Set value in dictionary at the location indicated by path</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-exec-classif-module">
+<h2>multiview_platform.mono_multi_view_classifiers.exec_classif module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-exec-classif-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.result_analysis">
-<span id="multiview-platform-mono-multi-view-classifiers-result-analysis-module"></span><h2>multiview_platform.mono_multi_view_classifiers.result_analysis module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.result_analysis" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-result-analysis-module">
+<h2>multiview_platform.mono_multi_view_classifiers.result_analysis module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-result-analysis-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -428,9 +187,9 @@ dictionary the asked algorithms. If none is asked by the user, all will be added
 <li><a class="reference internal" href="#">multiview_platform.mono_multi_view_classifiers package</a><ul>
 <li><a class="reference internal" href="#subpackages">Subpackages</a></li>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.exec_classif">multiview_platform.mono_multi_view_classifiers.exec_classif module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.result_analysis">multiview_platform.mono_multi_view_classifiers.result_analysis module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-exec-classif-module">multiview_platform.mono_multi_view_classifiers.exec_classif module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-result-analysis-module">multiview_platform.mono_multi_view_classifiers.result_analysis module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html
index a712ec5fc1722a7e2e0fd986b33a615c5d16bf74..8713b261a5e41331d4b2b1cabf73db1949fd12b1 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -58,19 +60,8 @@
 <div class="section" id="multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module">
 <h2>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.difficulty_fusion module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion">
-<em class="property">class </em><code class="sig-name descname">DifficultyFusion</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">monoview_estimators</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_configs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils.GlobalDiversityFusionClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion.diversity_measure">
-<code class="sig-name descname">diversity_measure</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifiers_decisions</span></em>, <em class="sig-param"><span class="n">combination</span></em>, <em class="sig-param"><span class="n">y</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion.diversity_measure" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -86,7 +77,7 @@
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.analyze_results module</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.difficulty_fusion module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html
index 9c3d2ab287721120bfaf00aad444ab592f79337b..5e97ff34f525498aaf1e0d4d6d7064b530af1c56 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -58,19 +60,8 @@
 <div class="section" id="multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module">
 <h2>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.disagree_fusion module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion">
-<em class="property">class </em><code class="sig-name descname">DisagreeFusion</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">monoview_estimators</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_configs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils.CoupleDiversityFusionClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion.diversity_measure">
-<code class="sig-name descname">diversity_measure</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">first_classifier_decision</span></em>, <em class="sig-param"><span class="n">second_classifier_decision</span></em>, <em class="sig-param"><span class="n">_</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion.diversity_measure" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -86,7 +77,7 @@
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.analyze_results module</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.disagree_fusion module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html
index a533b3833782a5a840ce0f22ac312ba6c33ff47c..e14428ed6f2830a1bb42346e990c7607a57649d9 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -58,19 +60,8 @@
 <div class="section" id="multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module">
 <h2>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.double_fault_fusion module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion">
-<em class="property">class </em><code class="sig-name descname">DoubleFaultFusion</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">monoview_estimators</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_configs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils.CoupleDiversityFusionClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion.diversity_measure">
-<code class="sig-name descname">diversity_measure</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">first_classifier_decision</span></em>, <em class="sig-param"><span class="n">second_classifier_decision</span></em>, <em class="sig-param"><span class="n">y</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion.diversity_measure" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -86,7 +77,7 @@
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.analyze_results module</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.double_fault_fusion module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html
index 287a22d32890ef33aac3a5832a4203d9f6faa369..250f427e69583c392f263af2cea926a29ee643cc 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -58,19 +60,8 @@
 <div class="section" id="multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module">
 <h2>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.entropy_fusion module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion">
-<em class="property">class </em><code class="sig-name descname">EntropyFusion</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">monoview_estimators</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classifier_configs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils.GlobalDiversityFusionClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion.diversity_measure">
-<code class="sig-name descname">diversity_measure</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifiers_decisions</span></em>, <em class="sig-param"><span class="n">combination</span></em>, <em class="sig-param"><span class="n">y</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion.diversity_measure" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -86,7 +77,7 @@
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.analyze_results module</a></li>
 <li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.entropy_fusion module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html
index ed0df15f8c93ee5ed3361c178d4fdb906f266c64..879518351c835a305f3c02737d223ba772bfdefd 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.html
index cb8c77b6151bba682bbd510510f1bd902112472e..f462fcdc01440192ae20da27be8111ff280b54e6 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.html
index b9f2c2991052abcc1df8c23e11ee2cefe64e9014..efe4897a656d6489b58eecaa7d9fab1b7ae97b21 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.html
index 1bcfc2e24df209e2dd2cf6539f99fc21b44e1c17..dffb0678f982c5dc2005d41d3848c2c2c2629363 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.html
index 76271406284069fdec95f8bb25d1597776ac3ac0..e4fdffd46bdf7e8ac2447c87c5d7baf43c787aac 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.html
index 81c878e85a22f7e053230b9b357f775f55553dc4..3a83012602e8bd620b89ce7eec839018335975fa 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html
index b638e9748e25bf247e7110f1f50fdd72c264c7f5..b4ca993a150deb66223c9a8d62326a3460a5da5c 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -56,28 +58,28 @@
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.analyze_results module</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-difficulty-fusion-difficulty-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.difficulty_fusion module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.analyze_results module</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-disagree-fusion-disagree-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.disagree_fusion module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.analyze_results module</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-double-fault-fusion-double-fault-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.double_fault_fusion module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#submodules">Submodules</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-analyze-results-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.analyze_results module</a></li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#multiview-platform-mono-multi-view-classifiers-multiview-classifiers-entropy-fusion-entropy-fusion-module">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.entropy_fusion module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.html">multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package</a><ul>
@@ -140,8 +142,8 @@
 </ul>
 </div>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -155,7 +157,7 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.mono_multi_view_classifiers.multiview_classifiers package</a><ul>
 <li><a class="reference internal" href="#subpackages">Subpackages</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.multiview_classifiers">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.html
index 9c2390e46f3448602bc0cac9e9e708274c6cbcac..472d87bcb662db405e6ab7d09c0eeaad3e098e10 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.mono_multi_view_classifiers.utils.html b/docs/build/references/multiview_platform.mono_multi_view_classifiers.utils.html
index 78ae1c63f2e5dea33e682bbfb2ba5c64c85f706b..3e77e03e79c820a960ec33b1b80c4f4715ab29d3 100644
--- a/docs/build/references/multiview_platform.mono_multi_view_classifiers.utils.html
+++ b/docs/build/references/multiview_platform.mono_multi_view_classifiers.utils.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.mono_multi_view_classifiers.utils package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -51,1281 +53,35 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.configuration">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-configuration-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.configuration module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.configuration" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.configuration.get_the_args">
-<code class="sig-name descname">get_the_args</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">path_to_config_file</span><span class="o">=</span><span class="default_value">'../config_files/config.yml'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.configuration.get_the_args" title="Permalink to this definition">¶</a></dt>
-<dd><p>The function for extracting the args for a ‘.yml’ file.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>path_to_config_file</strong> (<em>str</em><em>, </em><em>path to the yml file containing the configuration</em>) – </p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><ul class="simple">
-<li><p><strong>yaml_config</strong> (<em>dict, the dictionary conaining the configuration for the</em>)</p></li>
-<li><p><em>benchmark</em></p></li>
-</ul>
-</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.configuration.pass_default_config">
-<code class="sig-name descname">pass_default_config</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">log</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">name</span><span class="o">=</span><span class="default_value">['plausible']</span></em>, <em class="sig-param"><span class="n">label</span><span class="o">=</span><span class="default_value">'_'</span></em>, <em class="sig-param"><span class="n">file_type</span><span class="o">=</span><span class="default_value">'.hdf5'</span></em>, <em class="sig-param"><span class="n">views</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">pathf</span><span class="o">=</span><span class="default_value">'../data/'</span></em>, <em class="sig-param"><span class="n">nice</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">42</span></em>, <em class="sig-param"><span class="n">nb_cores</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="n">full</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">debug</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">add_noise</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">noise_std</span><span class="o">=</span><span class="default_value">0.0</span></em>, <em class="sig-param"><span class="n">res_dir</span><span class="o">=</span><span class="default_value">'../results/'</span></em>, <em class="sig-param"><span class="n">track_tracebacks</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">split</span><span class="o">=</span><span class="default_value">0.49</span></em>, <em class="sig-param"><span class="n">nb_folds</span><span class="o">=</span><span class="default_value">5</span></em>, <em class="sig-param"><span class="n">nb_class</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">classes</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">type</span><span class="o">=</span><span class="default_value">['multiview']</span></em>, <em class="sig-param"><span class="n">algos_monoview</span><span class="o">=</span><span class="default_value">['all']</span></em>, <em class="sig-param"><span class="n">algos_multiview</span><span class="o">=</span><span class="default_value">['svm_jumbo_fusion']</span></em>, <em class="sig-param"><span class="n">stats_iter</span><span class="o">=</span><span class="default_value">2</span></em>, <em class="sig-param"><span class="n">metrics</span><span class="o">=</span><span class="default_value">{'accuracy_score': {}, 'f1_score': {}}</span></em>, <em class="sig-param"><span class="n">metric_princ</span><span class="o">=</span><span class="default_value">'accuracy_score'</span></em>, <em class="sig-param"><span class="n">hps_type</span><span class="o">=</span><span class="default_value">'Random'</span></em>, <em class="sig-param"><span class="n">hps_iter</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="n">hps_kwargs</span><span class="o">=</span><span class="default_value">{'equivalent_draws': True, 'n_iter': 10}</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.configuration.pass_default_config" title="Permalink to this definition">¶</a></dt>
-<dd><dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>log</strong> – </p></li>
-<li><p><strong>name</strong> – </p></li>
-<li><p><strong>label</strong> – </p></li>
-<li><p><strong>file_type</strong> – </p></li>
-<li><p><strong>views</strong> – </p></li>
-<li><p><strong>pathf</strong> – </p></li>
-<li><p><strong>nice</strong> – </p></li>
-<li><p><strong>random_state</strong> – </p></li>
-<li><p><strong>nb_cores</strong> – </p></li>
-<li><p><strong>full</strong> – </p></li>
-<li><p><strong>debug</strong> – </p></li>
-<li><p><strong>add_noise</strong> – </p></li>
-<li><p><strong>noise_std</strong> – </p></li>
-<li><p><strong>res_dir</strong> – </p></li>
-<li><p><strong>track_tracebacks</strong> – </p></li>
-<li><p><strong>split</strong> – </p></li>
-<li><p><strong>nb_folds</strong> – </p></li>
-<li><p><strong>nb_class</strong> – </p></li>
-<li><p><strong>classes</strong> – </p></li>
-<li><p><strong>type</strong> – </p></li>
-<li><p><strong>algos_monoview</strong> – </p></li>
-<li><p><strong>algos_multiview</strong> – </p></li>
-<li><p><strong>stats_iter</strong> – </p></li>
-<li><p><strong>metrics</strong> – </p></li>
-<li><p><strong>metric_princ</strong> – </p></li>
-<li><p><strong>hps_type</strong> – </p></li>
-<li><p><strong>hps_iter</strong> – </p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.configuration.save_config">
-<code class="sig-name descname">save_config</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">directory</span></em>, <em class="sig-param"><span class="n">arguments</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.configuration.save_config" title="Permalink to this definition">¶</a></dt>
-<dd><p>Saves the config file in the result directory.</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-configuration-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.configuration module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-configuration-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.dataset">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-dataset-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.dataset module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.dataset" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset">
-<em class="property">class </em><code class="sig-name descname">Dataset</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.check_selected_label_names">
-<code class="sig-name descname">check_selected_label_names</code><span class="sig-paren">(</span><em class="sig-param">nb_labels=None</em>, <em class="sig-param">selected_label_names=None</em>, <em class="sig-param">random_state=RandomState(MT19937) at 0x7FC3CBFFD678</em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.check_selected_label_names" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.filter">
-<em class="property">abstract </em><code class="sig-name descname">filter</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">label_names</span></em>, <em class="sig-param"><span class="n">example_indices</span></em>, <em class="sig-param"><span class="n">view_names</span></em>, <em class="sig-param"><span class="n">path</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.filter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_label_names">
-<em class="property">abstract </em><code class="sig-name descname">get_label_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_label_names" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_labels">
-<em class="property">abstract </em><code class="sig-name descname">get_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_labels" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_nb_examples">
-<em class="property">abstract </em><code class="sig-name descname">get_nb_examples</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_nb_examples" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_shape">
-<code class="sig-name descname">get_shape</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_index</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_shape" title="Permalink to this definition">¶</a></dt>
-<dd><p>Gets the shape of the needed view on the asked examples</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>view_index</strong> (<em>int</em>) – The index of the view to extract</p></li>
-<li><p><strong>example_indices</strong> (<em>numpy.ndarray</em>) – The array containing the indices of the examples to extract.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>Tuple containing the shape</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_v">
-<em class="property">abstract </em><code class="sig-name descname">get_v</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_index</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.get_v" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.init_example_indces">
-<code class="sig-name descname">init_example_indces</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.init_example_indces" title="Permalink to this definition">¶</a></dt>
-<dd><p>If no example indices are provided, selects all the examples.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_labels">
-<code class="sig-name descname">select_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">selected_label_names</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_labels" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_views_and_labels">
-<code class="sig-name descname">select_views_and_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">nb_labels</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">selected_label_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">path_for_new</span><span class="o">=</span><span class="default_value">'../data/'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.select_views_and_labels" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.to_numpy_array">
-<code class="sig-name descname">to_numpy_array</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset.to_numpy_array" title="Permalink to this definition">¶</a></dt>
-<dd><p>To concatenate the needed views in one big numpy array while saving the
-limits of each view in a list, to be able to retrieve them later.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>example_indices</strong> (<em>array like</em><em>,</em>) – </p></li>
-<li><p><strong>indices of the examples to extract from the dataset</strong> (<em>The</em>) – </p></li>
-<li><p><strong>view_indices</strong> (<em>array like</em><em>,</em>) – </p></li>
-<li><p><strong>indices of the view to concatenate in the numpy array</strong> (<em>The</em>) – </p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><ul class="simple">
-<li><p><strong>concat_views</strong> (<em>numpy array,</em>)</p></li>
-<li><p><em>The numpy array containing all the needed views.</em></p></li>
-<li><p><strong>view_limits</strong> (<em>list of int</em>)</p></li>
-<li><p><em>The limits of each slice used to extract the views.</em></p></li>
-</ul>
-</p>
-</dd>
-</dl>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset">
-<em class="property">class </em><code class="sig-name descname">HDF5Dataset</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">views</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">are_sparse</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">file_name</span><span class="o">=</span><span class="default_value">'dataset.hdf5'</span></em>, <em class="sig-param"><span class="n">view_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">path</span><span class="o">=</span><span class="default_value">''</span></em>, <em class="sig-param"><span class="n">hdf5_file</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">is_temp</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">example_ids</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset" title="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset</span></code></a></p>
-<p>Class of Dataset</p>
-<p>This class is used to encapsulate the multiview dataset while keeping it stored on the disk instead of in RAM.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>views</strong> (<em>list of numpy arrays</em><em> or </em><em>None</em>) – The list containing each view of the dataset as a numpy array of shape
-(nb examples, nb features).</p></li>
-<li><p><strong>labels</strong> (<em>numpy array</em><em> or </em><em>None</em>) – The labels for the multiview dataset, of shape (nb examples, ).</p></li>
-<li><p><strong>are_sparse</strong> (<em>list of bool</em><em>, or </em><em>None</em>) – The list of boolean telling if each view is sparse or not.</p></li>
-<li><p><strong>file_name</strong> (<em>str</em><em>, or </em><em>None</em>) – The name of the hdf5 file that will be created to store the multiview
-dataset.</p></li>
-<li><p><strong>view_names</strong> (<em>list of str</em><em>, or </em><em>None</em>) – The name of each view.</p></li>
-<li><p><strong>path</strong> (<em>str</em><em>, or </em><em>None</em>) – The path where the hdf5 dataset file will be stored</p></li>
-<li><p><strong>hdf5_file</strong> (<em>h5py.File object</em><em>, or </em><em>None</em>) – If not None, the dataset will be imported directly from this file.</p></li>
-<li><p><strong>labels_names</strong> (<em>list of str</em><em>, or </em><em>None</em>) – The name for each unique value of the labels given in labels.</p></li>
-<li><p><strong>is_temp</strong> (<em>bool</em>) – Used if a temporary dataset has to be used by the benchmark.</p></li>
-</ul>
-</dd>
-</dl>
-<dl class="py attribute">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.dataset">
-<code class="sig-name descname">dataset</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.dataset" title="Permalink to this definition">¶</a></dt>
-<dd><p>The h5py file pbject that points to the hdf5 dataset on the disk.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Type</dt>
-<dd class="field-odd"><p>h5py.File object</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py attribute">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.nb_view">
-<code class="sig-name descname">nb_view</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.nb_view" title="Permalink to this definition">¶</a></dt>
-<dd><p>The number of views in the dataset.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Type</dt>
-<dd class="field-odd"><p>int</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py attribute">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.view_dict">
-<code class="sig-name descname">view_dict</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.view_dict" title="Permalink to this definition">¶</a></dt>
-<dd><dl class="simple">
-<dt>The dictionnary with the name of each view as the keys and their indices</dt><dd><p>as values</p>
-</dd>
-</dl>
-<dl class="field-list simple">
-<dt class="field-odd">Type</dt>
-<dd class="field-odd"><p>dict</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.add_gaussian_noise">
-<code class="sig-name descname">add_gaussian_noise</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span></em>, <em class="sig-param"><span class="n">path</span></em>, <em class="sig-param"><span class="n">noise_std</span><span class="o">=</span><span class="default_value">0.15</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.add_gaussian_noise" title="Permalink to this definition">¶</a></dt>
-<dd><p>In this function, we add a guaussian noise centered in 0 with specified
-std to each view, according to it’s range (the noise will be
-mutliplied by this range) and we crop the noisy signal according to the
-view’s attributes limits.
-This is done by creating a new dataset, to keep clean data.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.copy_view">
-<code class="sig-name descname">copy_view</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">target_dataset</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">source_view_name</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">target_view_index</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.copy_view" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.filter">
-<code class="sig-name descname">filter</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">label_names</span></em>, <em class="sig-param"><span class="n">example_indices</span></em>, <em class="sig-param"><span class="n">view_names</span></em>, <em class="sig-param"><span class="n">path</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.filter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_label_names">
-<code class="sig-name descname">get_label_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">decode</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_label_names" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get the list of the label names for the given set of examples</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>decode</strong> (<em>bool</em>) – If True, will decode the label names before listing them</p></li>
-<li><p><strong>example_indices</strong> (<em>numpy.ndarray</em>) – The array containing the indices of the needed examples</p></li>
-</ul>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_labels">
-<code class="sig-name descname">get_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_labels" title="Permalink to this definition">¶</a></dt>
-<dd><p>Gets the label array for the asked examples</p>
-<blockquote>
-<div><p>Parameters</p>
-</div></blockquote>
-<dl class="simple">
-<dt>example_indices<span class="classifier">numpy.ndarray</span></dt><dd><p>The array containing the indices of the examples to extract.</p>
-</dd>
-</dl>
-<dl class="field-list simple">
-<dt class="field-odd">Returns</dt>
-<dd class="field-odd"><p></p>
-</dd>
-<dt class="field-even">Return type</dt>
-<dd class="field-even"><p>numpy.ndarray containing the labels of the asked examples</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_name">
-<code class="sig-name descname">get_name</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_name" title="Permalink to this definition">¶</a></dt>
-<dd><p>Ony works if there are not multiple dots in the files name</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_class">
-<code class="sig-name descname">get_nb_class</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_class" title="Permalink to this definition">¶</a></dt>
-<dd><p>Gets the number of classes of the dataset for the asked examples</p>
-<blockquote>
-<div><p>Parameters</p>
-</div></blockquote>
-<dl class="simple">
-<dt>example_indices<span class="classifier">numpy.ndarray</span></dt><dd><p>The array containing the indices of the examples to extract.</p>
-</dd>
-</dl>
-<dl class="field-list simple">
-<dt class="field-odd">Returns</dt>
-<dd class="field-odd"><p><strong>int</strong></p>
-</dd>
-<dt class="field-even">Return type</dt>
-<dd class="field-even"><p>The number of classes</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_examples">
-<code class="sig-name descname">get_nb_examples</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_nb_examples" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get the number of examples available in hte dataset</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_v">
-<code class="sig-name descname">get_v</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_index</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_v" title="Permalink to this definition">¶</a></dt>
-<dd><p>Extract the view and returns a numpy.ndarray containing the description
-of the examples specified in example_indices</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>view_index</strong> (<em>int</em>) – The index of the view to extract</p></li>
-<li><p><strong>example_indices</strong> (<em>numpy.ndarray</em>) – The array containing the indices of the examples to extract.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>A numpy.ndarray containing the view data for the needed examples</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_dict">
-<code class="sig-name descname">get_view_dict</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_dict" title="Permalink to this definition">¶</a></dt>
-<dd><p>Returns the dictionary with view indices as keys and their corresponding
-names as values</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_name">
-<code class="sig-name descname">get_view_name</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_idx</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.get_view_name" title="Permalink to this definition">¶</a></dt>
-<dd><p>Method to get a view’s name from its index.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>view_idx</strong> (<em>int</em>) – The index of the view in the dataset</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>The view’s name.</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_attrs">
-<code class="sig-name descname">init_attrs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_attrs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init the attributes that are modified when self.dataset
-changes</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_view_names">
-<code class="sig-name descname">init_view_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_names</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.init_view_names" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.rm">
-<code class="sig-name descname">rm</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.rm" title="Permalink to this definition">¶</a></dt>
-<dd><p>Method used to delete the dataset file on the disk if the dataset is
-temporary.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.update_hdf5_dataset">
-<code class="sig-name descname">update_hdf5_dataset</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">path</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset.update_hdf5_dataset" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset">
-<em class="property">class </em><code class="sig-name descname">RAMDataset</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">views</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">are_sparse</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">view_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">labels_names</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">example_ids</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">name</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset" title="multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.filter">
-<code class="sig-name descname">filter</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">label_names</span></em>, <em class="sig-param"><span class="n">example_indices</span></em>, <em class="sig-param"><span class="n">view_names</span></em>, <em class="sig-param"><span class="n">path</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.filter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_label_names">
-<code class="sig-name descname">get_label_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">decode</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_label_names" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_labels">
-<code class="sig-name descname">get_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_labels" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_name">
-<code class="sig-name descname">get_name</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_name" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_class">
-<code class="sig-name descname">get_nb_class</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_class" title="Permalink to this definition">¶</a></dt>
-<dd><p>Gets the number of class of the dataset</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_examples">
-<code class="sig-name descname">get_nb_examples</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_nb_examples" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_v">
-<code class="sig-name descname">get_v</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_index</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_v" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_dict">
-<code class="sig-name descname">get_view_dict</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_dict" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_name">
-<code class="sig-name descname">get_view_name</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_idx</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.get_view_name" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.init_attrs">
-<code class="sig-name descname">init_attrs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset.init_attrs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init the two attributes that are modified when self.dataset
-changes</p>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.confirm">
-<code class="sig-name descname">confirm</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">resp</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">timeout</span><span class="o">=</span><span class="default_value">15</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.confirm" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to process answer</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.copy_hdf5">
-<code class="sig-name descname">copy_hdf5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">pathF</span></em>, <em class="sig-param"><span class="n">name</span></em>, <em class="sig-param"><span class="n">nbCores</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.copy_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to copy a HDF5 database in case of multicore computing</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.datasets_already_exist">
-<code class="sig-name descname">datasets_already_exist</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">pathF</span></em>, <em class="sig-param"><span class="n">name</span></em>, <em class="sig-param"><span class="n">nbCores</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.datasets_already_exist" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to check if it’s necessary to copy datasets</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.delete_HDF5">
-<code class="sig-name descname">delete_HDF5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">benchmarkArgumentsDictionaries</span></em>, <em class="sig-param"><span class="n">nbCores</span></em>, <em class="sig-param"><span class="n">dataset</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.delete_HDF5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to delete temporary copies at the end of the benchmark</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.extract_subset">
-<code class="sig-name descname">extract_subset</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">matrix</span></em>, <em class="sig-param"><span class="n">used_indices</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.extract_subset" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to extract a subset of a matrix even if it’s sparse</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.get_examples_views_indices">
-<code class="sig-name descname">get_examples_views_indices</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dataset</span></em>, <em class="sig-param"><span class="n">examples_indices</span></em>, <em class="sig-param"><span class="n">view_indices</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.get_examples_views_indices" title="Permalink to this definition">¶</a></dt>
-<dd><p>This function  is used to get all the examples indices and view indices if needed</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.init_multiple_datasets">
-<code class="sig-name descname">init_multiple_datasets</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">path_f</span></em>, <em class="sig-param"><span class="n">name</span></em>, <em class="sig-param"><span class="n">nb_cores</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.init_multiple_datasets" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to create copies of the dataset if multicore computation is used.</p>
-<p>This is a temporary solution to fix the sharing memory issue with HDF5 datasets.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>path_f</strong> (<em>string</em>) – Path to the original dataset directory</p></li>
-<li><p><strong>name</strong> (<em>string</em>) – Name of the dataset</p></li>
-<li><p><strong>nb_cores</strong> (<em>int</em>) – The number of threads that the benchmark can use</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>datasetFiles</strong> – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>None</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.input_">
-<code class="sig-name descname">input_</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">timeout</span><span class="o">=</span><span class="default_value">15</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.input_" title="Permalink to this definition">¶</a></dt>
-<dd><p>used as a UI to stop if too much HDD space will be used</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.dataset.is_just_number">
-<code class="sig-name descname">is_just_number</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">string</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.dataset.is_just_number" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-dataset-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.dataset module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-dataset-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.execution">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-execution-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.execution module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.execution" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.find_dataset_names">
-<code class="sig-name descname">find_dataset_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">path</span></em>, <em class="sig-param"><span class="n">type</span></em>, <em class="sig-param"><span class="n">names</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.find_dataset_names" title="Permalink to this definition">¶</a></dt>
-<dd><p>This function goal is to browse the dataset directory and extrats all
-the needed dataset names.</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.gen_argument_dictionaries">
-<code class="sig-name descname">gen_argument_dictionaries</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels_dictionary</span></em>, <em class="sig-param"><span class="n">directories</span></em>, <em class="sig-param"><span class="n">splits</span></em>, <em class="sig-param"><span class="n">hyper_param_search</span></em>, <em class="sig-param"><span class="n">args</span></em>, <em class="sig-param"><span class="n">k_folds</span></em>, <em class="sig-param"><span class="n">stats_iter_random_states</span></em>, <em class="sig-param"><span class="n">metrics</span></em>, <em class="sig-param"><span class="n">argument_dictionaries</span></em>, <em class="sig-param"><span class="n">benchmark</span></em>, <em class="sig-param"><span class="n">views</span></em>, <em class="sig-param"><span class="n">views_indices</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_argument_dictionaries" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to generate a dictionary for each benchmark.</p>
-<p>One for each label combination (if multiclass), for each statistical iteration, generates an dictionary with
-all necessary information to perform the benchmark</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>labels_dictionary</strong> (<em>dictionary</em>) – Dictionary mapping labels indices to labels names.</p></li>
-<li><p><strong>directories</strong> (<em>list of strings</em>) – List of the paths to the result directories for each statistical iteration.</p></li>
-<li><p><strong>multiclass_labels</strong> (<em>list of lists of numpy.ndarray</em>) – For each label couple, for each statistical iteration a triplet of numpy.ndarrays is stored with the
-indices for the biclass training set, the ones for the biclass testing set and the ones for the
-multiclass testing set.</p></li>
-<li><p><strong>labels_combinations</strong> (<em>list of lists of numpy.ndarray</em>) – Each original couple of different labels.</p></li>
-<li><p><strong>indices_multiclass</strong> (<em>list of lists of numpy.ndarray</em>) – For each combination, contains a biclass labels numpy.ndarray with the 0/1 labels of combination.</p></li>
-<li><p><strong>hyper_param_search</strong> (<em>string</em>) – Type of hyper parameter optimization method</p></li>
-<li><p><strong>args</strong> (<em>parsed args objects</em>) – All the args passed by the user.</p></li>
-<li><p><strong>k_folds</strong> (<em>list of list of sklearn.model_selection.StratifiedKFold</em>) – For each statistical iteration a Kfold stratified (keeping the ratio between classes in each fold).</p></li>
-<li><p><strong>stats_iter_random_states</strong> (<em>list of numpy.random.RandomState objects</em>) – Multiple random states, one for each sattistical iteration of the same benchmark.</p></li>
-<li><p><strong>metrics</strong> (<em>list of lists</em>) – metrics that will be used to evaluate the algorithms performance.</p></li>
-<li><p><strong>argument_dictionaries</strong> (<em>dictionary</em>) – Dictionary resuming all the specific arguments for the benchmark, oe dictionary for each classifier.</p></li>
-<li><p><strong>benchmark</strong> (<em>dictionary</em>) – Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.</p></li>
-<li><p><strong>nb_views</strong> (<em>int</em>) – THe number of views used by the benchmark.</p></li>
-<li><p><strong>views</strong> (<em>list of strings</em>) – List of the names of the used views.</p></li>
-<li><p><strong>views_indices</strong> (<em>list of ints</em>) – List of indices (according to the dataset) of the used views.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>benchmarkArgumentDictionaries</strong> – All the needed arguments for the benchmarks.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of dicts</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.gen_direcorties_names">
-<code class="sig-name descname">gen_direcorties_names</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">directory</span></em>, <em class="sig-param"><span class="n">stats_iter</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_direcorties_names" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to generate the different directories of each iteration if needed.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>directory</strong> (<em>string</em>) – Path to the results directory.</p></li>
-<li><p><strong>statsIter</strong> (<em>int</em>) – The number of statistical iterations.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>directories</strong> – Paths to each statistical iterations result directory.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of strings</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.gen_k_folds">
-<code class="sig-name descname">gen_k_folds</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">stats_iter</span></em>, <em class="sig-param"><span class="n">nb_folds</span></em>, <em class="sig-param"><span class="n">stats_iter_random_states</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_k_folds" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to generate folds indices for cross validation for each statistical iteration.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>stats_iter</strong> (<em>integer</em>) – Number of statistical iterations of the benchmark.</p></li>
-<li><p><strong>nb_folds</strong> (<em>integer</em>) – The number of cross-validation folds for the benchmark.</p></li>
-<li><p><strong>stats_iter_random_states</strong> (<em>list of numpy.random.RandomState</em>) – The random states for each statistical iteration.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>folds_list</strong> – For each statistical iteration a Kfold stratified (keeping the ratio between classes in each fold).</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of list of sklearn.model_selection.StratifiedKFold</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.gen_splits">
-<code class="sig-name descname">gen_splits</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em>, <em class="sig-param"><span class="n">split_ratio</span></em>, <em class="sig-param"><span class="n">stats_iter_random_states</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.gen_splits" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to _gen the train/test splits using one or multiple random states.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>labels</strong> (<em>numpy.ndarray</em>) – Name of the database.</p></li>
-<li><p><strong>split_ratio</strong> (<em>float</em>) – The ratio of examples between train and test set.</p></li>
-<li><p><strong>stats_iter_random_states</strong> (<em>list of numpy.random.RandomState</em>) – The random states for each statistical iteration.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>splits</strong> – For each statistical iteration a couple of numpy.ndarrays is stored with the indices for the training set and
-the ones of the testing set.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of lists of numpy.ndarray</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.get_database_function">
-<code class="sig-name descname">get_database_function</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">name</span></em>, <em class="sig-param"><span class="n">type_var</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.get_database_function" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get the right database extraction function according to the type of database and it’s name</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>name</strong> (<em>string</em>) – Name of the database.</p></li>
-<li><p><strong>type_var</strong> (<em>string</em>) – type of dataset hdf5 or csv</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>getDatabase</strong> – The function that will be used to extract the database</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>function</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.init_log_file">
-<code class="sig-name descname">init_log_file</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">name</span></em>, <em class="sig-param"><span class="n">views</span></em>, <em class="sig-param"><span class="n">cl_type</span></em>, <em class="sig-param"><span class="n">log</span></em>, <em class="sig-param"><span class="n">debug</span></em>, <em class="sig-param"><span class="n">label</span></em>, <em class="sig-param"><span class="n">result_directory</span></em>, <em class="sig-param"><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.init_log_file" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init the directory where the preds will be stored and the log file.</p>
-<p>First this function will check if the result directory already exists (only one per minute is allowed).</p>
-<p>If the the result directory name is available, it is created, and the logfile is initiated.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>name</strong> (<em>string</em>) – Name of the database.</p></li>
-<li><p><strong>views</strong> (<em>list of strings</em>) – List of the view names that will be used in the benchmark.</p></li>
-<li><p><strong>cl_type</strong> (<em>list of strings</em>) – Type of benchmark that will be made .</p></li>
-<li><p><strong>log</strong> (<em>bool</em>) – Whether to show the log file in console or hide it.</p></li>
-<li><p><strong>debug</strong> (<em>bool</em>) – for debug option</p></li>
-<li><p><strong>label</strong> (<em>str  for label</em>) – </p></li>
-<li><p><strong>result_directory</strong> (<em>str name of the result directory</em>) – </p></li>
-<li><p><strong>add_noise</strong> (<em>bool for add noise</em>) – </p></li>
-<li><p><strong>noise_std</strong> (<em>level of std noise</em>) – </p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>results_directory</strong> – Reference to the main results directory for the benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>string</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.init_random_state">
-<code class="sig-name descname">init_random_state</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state_arg</span></em>, <em class="sig-param"><span class="n">directory</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.init_random_state" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to init a random state.
-If no random state is specified, it will generate a ‘random’ seed.
-If the <cite>randomSateArg</cite> is a string containing only numbers, it will be converted in</p>
-<blockquote>
-<div><p>an int to generate a seed.</p>
-</div></blockquote>
-<p>If the <cite>randomSateArg</cite> is a string with letters, it must be a path to a pickled random
-state file that will be loaded.
-The function will also pickle the new random state in a file tobe able to retrieve it later.
-Tested</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>random_state_arg</strong> (<em>None</em><em> or </em><em>string</em>) – See function description.</p></li>
-<li><p><strong>directory</strong> (<em>string</em>) – Path to the results directory.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>random_state</strong> – This random state will be used all along the benchmark .</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>numpy.random.RandomState object</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.init_stats_iter_random_states">
-<code class="sig-name descname">init_stats_iter_random_states</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">stats_iter</span></em>, <em class="sig-param"><span class="n">random_state</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.init_stats_iter_random_states" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to initialize multiple random states if needed because of multiple statistical iteration of the same benchmark</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>stats_iter</strong> (<em>int</em>) – Number of statistical iterations of the same benchmark done (with a different random state).</p></li>
-<li><p><strong>random_state</strong> (<em>numpy.random.RandomState object</em>) – The random state of the whole experimentation, that will be used to generate the ones for each
-statistical iteration.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>stats_iter_random_states</strong> – Multiple random states, one for each sattistical iteration of the same benchmark.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>list of numpy.random.RandomState objects</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.init_views">
-<code class="sig-name descname">init_views</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dataset_var</span></em>, <em class="sig-param"><span class="n">arg_views</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.init_views" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to return the views names that will be used by the
-benchmark, their indices and all the views names.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>dataset_var</strong> (<em>HDF5 dataset file</em>) – The full dataset that wil be used by the benchmark.</p></li>
-<li><p><strong>arg_views</strong> (<em>list of strings</em>) – The views that will be used by the benchmark (arg).</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><ul class="simple">
-<li><p><strong>views</strong> (<em>list of strings</em>) – Names of the views that will be used by the benchmark.</p></li>
-<li><p><strong>view_indices</strong> (<em>list of ints</em>) – The list of the indices of the view that will be used in the benchmark (according to the dataset).</p></li>
-<li><p><strong>all_views</strong> (<em>list of strings</em>) – Names of all the available views in the dataset.</p></li>
-</ul>
-</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.execution.parse_the_args">
-<code class="sig-name descname">parse_the_args</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">arguments</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.execution.parse_the_args" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to parse the args entered by the user</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-execution-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.execution module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-execution-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db" title="Permalink to this headline">¶</a></h2>
-<dl class="py exception">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.DatasetError">
-<em class="property">exception </em><code class="sig-name descname">DatasetError</code><span class="sig-paren">(</span><em class="sig-param"><span class="o">*</span><span class="n">args</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.DatasetError" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Exception</span></code></p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_csv">
-<code class="sig-name descname">get_classic_db_csv</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">views</span></em>, <em class="sig-param"><span class="n">pathF</span></em>, <em class="sig-param"><span class="n">nameDB</span></em>, <em class="sig-param"><span class="n">NB_CLASS</span></em>, <em class="sig-param"><span class="n">askedLabelsNames</span></em>, <em class="sig-param"><span class="n">random_state</span></em>, <em class="sig-param"><span class="n">full</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">add_noise</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">noise_std</span><span class="o">=</span><span class="default_value">0.15</span></em>, <em class="sig-param"><span class="n">delimiter</span><span class="o">=</span><span class="default_value">','</span></em>, <em class="sig-param"><span class="n">path_for_new</span><span class="o">=</span><span class="default_value">'../data/'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_csv" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_hdf5">
-<code class="sig-name descname">get_classic_db_hdf5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">views</span></em>, <em class="sig-param"><span class="n">path_f</span></em>, <em class="sig-param"><span class="n">name_DB</span></em>, <em class="sig-param"><span class="n">nb_class</span></em>, <em class="sig-param"><span class="n">asked_labels_names</span></em>, <em class="sig-param"><span class="n">random_state</span></em>, <em class="sig-param"><span class="n">full</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">add_noise</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">noise_std</span><span class="o">=</span><span class="default_value">0.15</span></em>, <em class="sig-param"><span class="n">path_for_new</span><span class="o">=</span><span class="default_value">'../data/'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_classic_db_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to load a hdf5 database</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_plausible_db_hdf5">
-<code class="sig-name descname">get_plausible_db_hdf5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">features</span></em>, <em class="sig-param"><span class="n">path</span></em>, <em class="sig-param"><span class="n">file_name</span></em>, <em class="sig-param"><span class="n">nb_class</span><span class="o">=</span><span class="default_value">3</span></em>, <em class="sig-param"><span class="n">label_names</span><span class="o">=</span><span class="default_value">[b'No', b'Yes', b'Maybe']</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">full</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">add_noise</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">noise_std</span><span class="o">=</span><span class="default_value">0.15</span></em>, <em class="sig-param"><span class="n">nb_view</span><span class="o">=</span><span class="default_value">3</span></em>, <em class="sig-param"><span class="n">nb_examples</span><span class="o">=</span><span class="default_value">100</span></em>, <em class="sig-param"><span class="n">nb_features</span><span class="o">=</span><span class="default_value">10</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.get_plausible_db_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to generate a plausible dataset to test the algorithms</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.make_me_noisy">
-<code class="sig-name descname">make_me_noisy</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">view_data</span></em>, <em class="sig-param"><span class="n">random_state</span></em>, <em class="sig-param"><span class="n">percentage</span><span class="o">=</span><span class="default_value">5</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db.make_me_noisy" title="Permalink to this definition">¶</a></dt>
-<dd><p>used to introduce some noise in the generated data</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint">
-<em class="property">class </em><code class="sig-name descname">CustomRandint</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">low</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">high</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">multiplier</span><span class="o">=</span><span class="default_value">''</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<p>Used as a distribution returning a integer between low and high-1.
-It can be used with a multiplier agrument to be able to perform more complex generation
-for example 10 e -(randint)</p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.get_nb_possibilities">
-<code class="sig-name descname">get_nb_possibilities</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.get_nb_possibilities" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.rvs">
-<code class="sig-name descname">rvs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint.rvs" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform">
-<em class="property">class </em><code class="sig-name descname">CustomUniform</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">loc</span><span class="o">=</span><span class="default_value">0</span></em>, <em class="sig-param"><span class="n">state</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="n">multiplier</span><span class="o">=</span><span class="default_value">''</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<p>Used as a distribution returning a float between loc and loc + scale..
-It can be used with a multiplier agrument to be able to perform more complex generation
-for example 10 e -(float)</p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform.rvs">
-<code class="sig-name descname">rvs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform.rvs" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid">
-<em class="property">class </em><code class="sig-name descname">Grid</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em>, <em class="sig-param"><span class="n">param_grid</span><span class="o">=</span><span class="default_value">{}</span></em>, <em class="sig-param"><span class="n">refit</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">n_jobs</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="n">scoring</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">cv</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">learning_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">framework</span><span class="o">=</span><span class="default_value">'monoview'</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">track_tracebacks</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.model_selection._search.GridSearchCV</span></code>, <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch" title="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.fit">
-<code class="sig-name descname">fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">groups</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">fit_params</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Run fit with all sets of parameters.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>X</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Training vector, where n_samples is the number of samples and
-n_features is the number of features.</p></li>
-<li><p><strong>y</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_output</em><em>) or </em><em>(</em><em>n_samples</em><em>,</em><em>)</em><em>, </em><em>optional</em>) – Target relative to X for classification or regression;
-None for unsupervised learning.</p></li>
-<li><p><strong>groups</strong> (<em>array-like</em><em>, </em><em>with shape</em><em> (</em><em>n_samples</em><em>,</em><em>)</em><em>, </em><em>optional</em>) – Group labels for the samples used while splitting the dataset into
-train/test set. Only used in conjunction with a “Group” <span class="xref std std-term">cv</span>
-instance (e.g., <code class="xref py py-class docutils literal notranslate"><span class="pre">GroupKFold</span></code>).</p></li>
-<li><p><strong>**fit_params</strong> (<em>dict of string -&gt; object</em>) – Parameters passed to the <code class="docutils literal notranslate"><span class="pre">fit</span></code> method of the estimator</p></li>
-</ul>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.get_candidate_params">
-<code class="sig-name descname">get_candidate_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid.get_candidate_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch">
-<em class="property">class </em><code class="sig-name descname">HPSearch</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.fit_multiview">
-<code class="sig-name descname">fit_multiview</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span></em>, <em class="sig-param"><span class="n">groups</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">fit_params</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.fit_multiview" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.gen_report">
-<code class="sig-name descname">gen_report</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">output_file_name</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.gen_report" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_best_params">
-<code class="sig-name descname">get_best_params</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_best_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_candidate_params">
-<em class="property">abstract </em><code class="sig-name descname">get_candidate_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_candidate_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_scoring">
-<code class="sig-name descname">get_scoring</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metric</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch.get_scoring" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random">
-<em class="property">class </em><code class="sig-name descname">Random</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em>, <em class="sig-param"><span class="n">param_distributions</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">n_iter</span><span class="o">=</span><span class="default_value">10</span></em>, <em class="sig-param"><span class="n">refit</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">n_jobs</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="n">scoring</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">cv</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">random_state</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">learning_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">framework</span><span class="o">=</span><span class="default_value">'monoview'</span></em>, <em class="sig-param"><span class="n">equivalent_draws</span><span class="o">=</span><span class="default_value">True</span></em>, <em class="sig-param"><span class="n">track_tracebacks</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.model_selection._search.RandomizedSearchCV</span></code>, <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch" title="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.fit">
-<code class="sig-name descname">fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">groups</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">fit_params</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Run fit with all sets of parameters.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>X</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Training vector, where n_samples is the number of samples and
-n_features is the number of features.</p></li>
-<li><p><strong>y</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_output</em><em>) or </em><em>(</em><em>n_samples</em><em>,</em><em>)</em><em>, </em><em>optional</em>) – Target relative to X for classification or regression;
-None for unsupervised learning.</p></li>
-<li><p><strong>groups</strong> (<em>array-like</em><em>, </em><em>with shape</em><em> (</em><em>n_samples</em><em>,</em><em>)</em><em>, </em><em>optional</em>) – Group labels for the samples used while splitting the dataset into
-train/test set. Only used in conjunction with a “Group” <span class="xref std std-term">cv</span>
-instance (e.g., <code class="xref py py-class docutils literal notranslate"><span class="pre">GroupKFold</span></code>).</p></li>
-<li><p><strong>**fit_params</strong> (<em>dict of string -&gt; object</em>) – Parameters passed to the <code class="docutils literal notranslate"><span class="pre">fit</span></code> method of the estimator</p></li>
-</ul>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_candidate_params">
-<code class="sig-name descname">get_candidate_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_candidate_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_param_distribs">
-<code class="sig-name descname">get_param_distribs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random.get_param_distribs" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.format_params">
-<code class="sig-name descname">format_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">params</span></em>, <em class="sig-param"><span class="n">pref</span><span class="o">=</span><span class="default_value">''</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.format_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.gen_heat_maps">
-<code class="sig-name descname">gen_heat_maps</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">params</span></em>, <em class="sig-param"><span class="n">scores_array</span></em>, <em class="sig-param"><span class="n">output_file_name</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.gen_heat_maps" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to generate a heat map for each doublet of hyperparms
-optimized on the previous function</p>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.spear_mint">
-<code class="sig-name descname">spear_mint</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dataset</span></em>, <em class="sig-param"><span class="n">classifier_name</span></em>, <em class="sig-param"><span class="n">views_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">k_folds</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">n_iter</span><span class="o">=</span><span class="default_value">1</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.spear_mint" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to perform spearmint on the classifiers to optimize hyper parameters,
-longer than randomsearch (can’t be parallelized)</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.make_file_config module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.make_file_config.ConfigurationMaker">
-<em class="property">class </em><code class="sig-name descname">ConfigurationMaker</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">classifier_dict</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.make_file_config.ConfigurationMaker" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<p>Find the name of the classifier from the dict classier to report</p>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.make_file_config module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-multiclass-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.multiclass module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper">
-<em class="property">class </em><code class="sig-name descname">MonoviewWrapper</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper</span></code></a></p>
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper">
-<em class="property">class </em><code class="sig-name descname">MultiClassWrapper</code><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.format_params">
-<code class="sig-name descname">format_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">params</span></em>, <em class="sig-param"><span class="n">deep</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.format_params" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_config">
-<code class="sig-name descname">get_config</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_config" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_interpretation">
-<code class="sig-name descname">get_interpretation</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">directory</span></em>, <em class="sig-param"><span class="n">base_file_name</span></em>, <em class="sig-param"><span class="n">y_test</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.get_interpretation" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.set_params">
-<code class="sig-name descname">set_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="o">**</span><span class="n">params</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper.set_params" title="Permalink to this definition">¶</a></dt>
-<dd><p>This function is useful in order for the OV_Wrappers to be transparent
-in terms of parameters.
-If we remove it the parameters have to be specified as estimator__param.
-Witch is not relevant for the platform</p>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper">
-<em class="property">class </em><code class="sig-name descname">MultiviewOVOWrapper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.multiclass.OneVsOneClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.fit">
-<code class="sig-name descname">fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span></em>, <em class="sig-param"><span class="n">train_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Fit underlying estimators.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>X</strong> (<em>(</em><em>sparse</em><em>) </em><em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Data.</p></li>
-<li><p><strong>y</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>,</em><em>)</em>) – Multi-class targets.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>self</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.get_params">
-<code class="sig-name descname">get_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">deep</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.get_params" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get parameters for this estimator.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>deep</strong> (<em>bool</em><em>, </em><em>default=True</em>) – If True, will return the parameters for this estimator and
-contained subobjects that are estimators.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>params</strong> – Parameter names mapped to their values.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>mapping of string to any</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.multiview_decision_function">
-<code class="sig-name descname">multiview_decision_function</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">example_indices</span></em>, <em class="sig-param"><span class="n">view_indices</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.multiview_decision_function" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.predict">
-<code class="sig-name descname">predict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper.predict" title="Permalink to this definition">¶</a></dt>
-<dd><p>Estimate the best class label for each sample in X.</p>
-<p>This is implemented as <code class="docutils literal notranslate"><span class="pre">argmax(decision_function(X),</span> <span class="pre">axis=1)</span></code> which
-will return the label of the class with most votes by estimators
-predicting the outcome of a decision for each possible class pair.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>X</strong> (<em>(</em><em>sparse</em><em>) </em><em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Data.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>y</strong> – Predicted multi-class targets.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>numpy array of shape [n_samples]</p>
-</dd>
-</dl>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper">
-<em class="property">class </em><code class="sig-name descname">MultiviewOVRWrapper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.multiclass.OneVsRestClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.fit">
-<code class="sig-name descname">fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span></em>, <em class="sig-param"><span class="n">train_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Fit underlying estimators.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>X</strong> (<em>(</em><em>sparse</em><em>) </em><em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Data.</p></li>
-<li><p><strong>y</strong> (<em>(</em><em>sparse</em><em>) </em><em>array-like of shape</em><em> (</em><em>n_samples</em><em>,</em><em>) or </em><em>(</em><em>n_samples</em><em>, </em><em>n_classes</em><em>)</em>) – Multi-class targets. An indicator matrix turns on multilabel
-classification.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>self</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.get_params">
-<code class="sig-name descname">get_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">deep</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.get_params" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get parameters for this estimator.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>deep</strong> (<em>bool</em><em>, </em><em>default=True</em>) – If True, will return the parameters for this estimator and
-contained subobjects that are estimators.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>params</strong> – Parameter names mapped to their values.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>mapping of string to any</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.predict">
-<code class="sig-name descname">predict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper.predict" title="Permalink to this definition">¶</a></dt>
-<dd><p>Predict multi-class targets using underlying estimators.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>X</strong> (<em>(</em><em>sparse</em><em>) </em><em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – Data.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>y</strong> – Predicted multi-class targets.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>(sparse) array-like of shape (n_samples,) or (n_samples, n_classes)</p>
-</dd>
-</dl>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper">
-<em class="property">class </em><code class="sig-name descname">MultiviewWrapper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">args</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper</span></code></a></p>
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper">
-<em class="property">class </em><code class="sig-name descname">OVOWrapper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em>, <em class="sig-param"><span class="n">n_jobs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.multiclass.OneVsOneClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.decision_function">
-<code class="sig-name descname">decision_function</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.decision_function" title="Permalink to this definition">¶</a></dt>
-<dd><p>Decision function for the OneVsOneClassifier.</p>
-<p>The decision values for the samples are computed by adding the
-normalized sum of pair-wise classification confidence levels to the
-votes in order to disambiguate between the decision values when the
-votes for all the classes are equal leading to a tie.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>X</strong> (<em>array-like of shape</em><em> (</em><em>n_samples</em><em>, </em><em>n_features</em><em>)</em>) – </p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>Y</strong></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>array-like of shape (n_samples, n_classes)</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.get_params">
-<code class="sig-name descname">get_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">deep</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper.get_params" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get parameters for this estimator.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>deep</strong> (<em>bool</em><em>, </em><em>default=True</em>) – If True, will return the parameters for this estimator and
-contained subobjects that are estimators.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>params</strong> – Parameter names mapped to their values.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>mapping of string to any</p>
-</dd>
-</dl>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper">
-<em class="property">class </em><code class="sig-name descname">OVRWrapper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em>, <em class="sig-param"><span class="n">n_jobs</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper" title="multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.mono_multi_view_classifiers.utils.multiclass.MonoviewWrapper</span></code></a>, <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.multiclass.OneVsRestClassifier</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper.get_params">
-<code class="sig-name descname">get_params</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">deep</span><span class="o">=</span><span class="default_value">True</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper.get_params" title="Permalink to this definition">¶</a></dt>
-<dd><p>Get parameters for this estimator.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>deep</strong> (<em>bool</em><em>, </em><em>default=True</em>) – If True, will return the parameters for this estimator and
-contained subobjects that are estimators.</p>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>params</strong> – Parameter names mapped to their values.</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>mapping of string to any</p>
-</dd>
-</dl>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiclass.get_mc_estim">
-<code class="sig-name descname">get_mc_estim</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">estimator</span></em>, <em class="sig-param"><span class="n">random_state</span></em>, <em class="sig-param"><span class="n">y</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">multiview</span><span class="o">=</span><span class="default_value">False</span></em>, <em class="sig-param"><span class="n">multiclass</span><span class="o">=</span><span class="default_value">False</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiclass.get_mc_estim" title="Permalink to this definition">¶</a></dt>
-<dd><p>Used to get a multiclass-compatible estimator if the one in param does not natively support multiclass.
-If perdict_proba is available in the asked estimator, a One Versus Rest wrapper is returned,
-else, a One Versus One wrapper is returned.</p>
-<p>To be able to deal with multiview algorithm, multiview wrappers are implemented separately.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>estimator</strong> (<em>sklearn-like estimator</em>) – Asked estimator</p></li>
-<li><p><strong>y</strong> (<em>numpy.array</em>) – The labels of the problem</p></li>
-<li><p><strong>random_state</strong> (<em>numpy.random.RandomState object</em>) – The random state, used to generate a fake multiclass problem</p></li>
-<li><p><strong>multiview</strong> (<em>bool</em>) – If True, mutliview-compatible wrappers are returned.</p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p><strong>estimator</strong> – Either the aksed estimator, or a multiclass-compatible wrapper over the asked estimator</p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>sklearn-like estimator</p>
-</dd>
-</dl>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-multiclass-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.multiclass module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-multiclass-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_metrics_scores">
-<code class="sig-name descname">get_metrics_scores</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metrics_var</span></em>, <em class="sig-param"><span class="n">train_labels</span></em>, <em class="sig-param"><span class="n">test_labels</span></em>, <em class="sig-param"><span class="n">validation_indices</span></em>, <em class="sig-param"><span class="n">learning_indices</span></em>, <em class="sig-param"><span class="n">labels</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_metrics_scores" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_total_metric_scores">
-<code class="sig-name descname">get_total_metric_scores</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metric</span></em>, <em class="sig-param"><span class="n">train_labels</span></em>, <em class="sig-param"><span class="n">test_labels</span></em>, <em class="sig-param"><span class="n">validation_indices</span></em>, <em class="sig-param"><span class="n">learning_indices</span></em>, <em class="sig-param"><span class="n">labels</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.get_total_metric_scores" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.print_metric_score">
-<code class="sig-name descname">print_metric_score</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metric_scores</span></em>, <em class="sig-param"><span class="n">metrics</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis.print_metric_score" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils.transformations">
-<span id="multiview-platform-mono-multi-view-classifiers-utils-transformations-module"></span><h2>multiview_platform.mono_multi_view_classifiers.utils.transformations module<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils.transformations" title="Permalink to this headline">¶</a></h2>
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.transformations.sign_labels">
-<code class="sig-name descname">sign_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.transformations.sign_labels" title="Permalink to this definition">¶</a></dt>
-<dd><p>Returns a label array with (-1,1) as labels.
-If labels was already made of (-1,1), returns labels.
-If labels is made of (0,1), returns labels with all
-zeros transformed in -1.</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><ul class="simple">
-<li><p><strong>labels</strong> – </p></li>
-<li><p><strong>original label numpy array</strong> (<em>The</em>) – </p></li>
-</ul>
-</dd>
-<dt class="field-even">Returns</dt>
-<dd class="field-even"><p></p>
-</dd>
-<dt class="field-odd">Return type</dt>
-<dd class="field-odd"><p>A np.array with labels made of (-1,1)</p>
-</dd>
-</dl>
-</dd></dl>
-
-<dl class="py function">
-<dt id="multiview_platform.mono_multi_view_classifiers.utils.transformations.unsign_labels">
-<code class="sig-name descname">unsign_labels</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">labels</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.mono_multi_view_classifiers.utils.transformations.unsign_labels" title="Permalink to this definition">¶</a></dt>
-<dd><p>The inverse function</p>
-<dl class="field-list simple">
-<dt class="field-odd">Parameters</dt>
-<dd class="field-odd"><p><strong>labels</strong> – </p>
-</dd>
-</dl>
-</dd></dl>
-
+<div class="section" id="multiview-platform-mono-multi-view-classifiers-utils-transformations-module">
+<h2>multiview_platform.mono_multi_view_classifiers.utils.transformations module<a class="headerlink" href="#multiview-platform-mono-multi-view-classifiers-utils-transformations-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.mono_multi_view_classifiers.utils">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.mono_multi_view_classifiers.utils" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -1339,16 +95,16 @@ zeros transformed in -1.</p>
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.mono_multi_view_classifiers.utils package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.configuration">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.dataset">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.execution">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.make_file_config">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.multiclass">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils.transformations">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.mono_multi_view_classifiers.utils">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-configuration-module">multiview_platform.mono_multi_view_classifiers.utils.configuration module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-dataset-module">multiview_platform.mono_multi_view_classifiers.utils.dataset module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-execution-module">multiview_platform.mono_multi_view_classifiers.utils.execution module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-get-multiview-db-module">multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-hyper-parameter-search-module">multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-make-file-config-module">multiview_platform.mono_multi_view_classifiers.utils.make_file_config module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-multiclass-module">multiview_platform.mono_multi_view_classifiers.utils.multiclass module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-multiview-result-analysis-module">multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis module</a></li>
+<li><a class="reference internal" href="#multiview-platform-mono-multi-view-classifiers-utils-transformations-module">multiview_platform.mono_multi_view_classifiers.utils.transformations module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.html b/docs/build/references/multiview_platform.tests.html
index a0ddb012b93e63e577f956511513477d15296320..a38fde72a0afaa25630a7200dd69265205a5e7ca 100644
--- a/docs/build/references/multiview_platform.tests.html
+++ b/docs/build/references/multiview_platform.tests.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -53,22 +55,22 @@
 <ul>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_metrics.html">multiview_platform.tests.test_metrics package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics.test_accuracy_score">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-multiview_platform.tests.test_metrics">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#multiview-platform-tests-test-metrics-test-accuracy-score-module">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_metrics.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html">multiview_platform.tests.test_mono_view package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-multiview_platform.tests.test_mono_view">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#multiview-platform-tests-test-mono-view-test-execclassifmonoview-module">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#multiview-platform-tests-test-mono-view-test-monoviewutils-module">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_mono_view.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html">multiview_platform.tests.test_monoview_classifiers package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-multiview_platform.tests.test_monoview_classifiers">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#multiview-platform-tests-test-monoview-classifiers-test-adaboost-module">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#multiview-platform-tests-test-monoview-classifiers-test-compatibility-module">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_monoview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html">multiview_platform.tests.test_multiview_classifiers package</a><ul>
@@ -108,24 +110,24 @@
 </li>
 <li class="toctree-l3"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package</a><ul>
 <li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#submodules">Submodules</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
-<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">Module contents</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
+<li class="toctree-l4"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
 </li>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-multiview_platform.tests.test_multiview_classifiers">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_utils.html">multiview_platform.tests.test_utils package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_GetMultiviewDB">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_configuration">multiview_platform.tests.test_utils.test_configuration module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_execution">multiview_platform.tests.test_utils.test_execution module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils.test_multiclass">multiview_platform.tests.test_utils.test_multiclass module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-multiview_platform.tests.test_utils">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-getmultiviewdb-module">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-configuration-module">multiview_platform.tests.test_utils.test_configuration module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-execution-module">multiview_platform.tests.test_utils.test_execution module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#multiview-platform-tests-test-utils-test-multiclass-module">multiview_platform.tests.test_utils.test_multiclass module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_utils.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
@@ -140,8 +142,8 @@
 <div class="section" id="multiview-platform-tests-test-resultanalysis-module">
 <h2>multiview_platform.tests.test_ResultAnalysis module<a class="headerlink" href="#multiview-platform-tests-test-resultanalysis-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -158,7 +160,7 @@
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
 <li><a class="reference internal" href="#multiview-platform-tests-test-execclassif-module">multiview_platform.tests.test_ExecClassif module</a></li>
 <li><a class="reference internal" href="#multiview-platform-tests-test-resultanalysis-module">multiview_platform.tests.test_ResultAnalysis module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests">Module contents</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_metrics.html b/docs/build/references/multiview_platform.tests.test_metrics.html
index 481dd91dda9d5c7c5a3a7cda3bd561accb4bab7f..71f37c9d03bbc9e2a969bd8ff9127b8a2ed42b0b 100644
--- a/docs/build/references/multiview_platform.tests.test_metrics.html
+++ b/docs/build/references/multiview_platform.tests.test_metrics.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_metrics package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -51,28 +53,11 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_metrics.test_accuracy_score">
-<span id="multiview-platform-tests-test-metrics-test-accuracy-score-module"></span><h2>multiview_platform.tests.test_metrics.test_accuracy_score module<a class="headerlink" href="#module-multiview_platform.tests.test_metrics.test_accuracy_score" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score">
-<em class="property">class </em><code class="sig-name descname">Test_accuracy_score</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.score_test">
-<code class="sig-name descname">score_test</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">metric_module</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.score_test" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.setUpClass">
-<code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-metrics-test-accuracy-score-module">
+<h2>multiview_platform.tests.test_metrics.test_accuracy_score module<a class="headerlink" href="#multiview-platform-tests-test-metrics-test-accuracy-score-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_metrics">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_metrics" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -86,8 +71,8 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.tests.test_metrics package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_metrics.test_accuracy_score">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_metrics">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-metrics-test-accuracy-score-module">multiview_platform.tests.test_metrics.test_accuracy_score module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_mono_view.html b/docs/build/references/multiview_platform.tests.test_mono_view.html
index 71842ac12bc68e418d1b922bb1a2248e08b7108a..43b6b2a4a00aa186e2047e279bf87818b1539feb 100644
--- a/docs/build/references/multiview_platform.tests.test_mono_view.html
+++ b/docs/build/references/multiview_platform.tests.test_mono_view.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_mono_view package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -51,94 +53,14 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">
-<span id="multiview-platform-tests-test-mono-view-test-execclassifmonoview-module"></span><h2>multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module<a class="headerlink" href="#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs">
-<em class="property">class </em><code class="sig-name descname">Test_getHPs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.tearDownClass">
-<em class="property">classmethod </em><code class="sig-name descname">tearDownClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.tearDownClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the class fixture after running all tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants">
-<em class="property">class </em><code class="sig-name descname">Test_initConstants</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.tearDownClass">
-<em class="property">classmethod </em><code class="sig-name descname">tearDownClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.tearDownClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the class fixture after running all tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest">
-<em class="property">class </em><code class="sig-name descname">Test_initTrainTest</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-mono-view-test-execclassifmonoview-module">
+<h2>multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module<a class="headerlink" href="#multiview-platform-tests-test-mono-view-test-execclassifmonoview-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">
-<span id="multiview-platform-tests-test-mono-view-test-monoviewutils-module"></span><h2>multiview_platform.tests.test_mono_view.test_MonoviewUtils module<a class="headerlink" href="#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds">
-<em class="property">class </em><code class="sig-name descname">Test_genTestFoldsPreds</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-mono-view-test-monoviewutils-module">
+<h2>multiview_platform.tests.test_mono_view.test_MonoviewUtils module<a class="headerlink" href="#multiview-platform-tests-test-mono-view-test-monoviewutils-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_mono_view">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_mono_view" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -152,9 +74,9 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.tests.test_mono_view package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_mono_view.test_ExecClassifMonoView">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_mono_view.test_MonoviewUtils">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_mono_view">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-mono-view-test-execclassifmonoview-module">multiview_platform.tests.test_mono_view.test_ExecClassifMonoView module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-mono-view-test-monoviewutils-module">multiview_platform.tests.test_mono_view.test_MonoviewUtils module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_monoview_classifiers.html b/docs/build/references/multiview_platform.tests.test_monoview_classifiers.html
index ef58ed271decdf574c9876791780bc0030dcbce6..cf7f017246b33fa1d5bfc6094488d63940cffae2 100644
--- a/docs/build/references/multiview_platform.tests.test_monoview_classifiers.html
+++ b/docs/build/references/multiview_platform.tests.test_monoview_classifiers.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_monoview_classifiers package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -51,14 +53,14 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">
-<span id="multiview-platform-tests-test-monoview-classifiers-test-adaboost-module"></span><h2>multiview_platform.tests.test_monoview_classifiers.test_adaboost module<a class="headerlink" href="#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-tests-test-monoview-classifiers-test-adaboost-module">
+<h2>multiview_platform.tests.test_monoview_classifiers.test_adaboost module<a class="headerlink" href="#multiview-platform-tests-test-monoview-classifiers-test-adaboost-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">
-<span id="multiview-platform-tests-test-monoview-classifiers-test-compatibility-module"></span><h2>multiview_platform.tests.test_monoview_classifiers.test_compatibility module<a class="headerlink" href="#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-tests-test-monoview-classifiers-test-compatibility-module">
+<h2>multiview_platform.tests.test_monoview_classifiers.test_compatibility module<a class="headerlink" href="#multiview-platform-tests-test-monoview-classifiers-test-compatibility-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_monoview_classifiers">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_monoview_classifiers" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -72,9 +74,9 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.tests.test_monoview_classifiers package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_monoview_classifiers.test_adaboost">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_monoview_classifiers.test_compatibility">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_monoview_classifiers">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-monoview-classifiers-test-adaboost-module">multiview_platform.tests.test_monoview_classifiers.test_adaboost module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-monoview-classifiers-test-compatibility-module">multiview_platform.tests.test_monoview_classifiers.test_compatibility module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.html
index f201d07f85396f43f491d3746e7b9f7c86188b7d..a07049ebee8f578a39f6cb7fd2a2dada50eca506 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.html
index 31441b8f8f4f8f32a85fb2c6baf028babd04cc8e..c69df000a8118f45342f6279b4d29d3f4583ebb6 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.html
index f6ce58f0171f228eacf52456924e741781791f51..63895d027fb481d84aba04d86966ca0bb2b3a200 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.html
index 4826184bb82c848d7b4641aa9cf48a40a9d22597..3f4c20eec04950a7d6d6168470e0097c6c9a961d 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.html
index 61d39aa86d245145246307ad71747b60e757a5bf..175dfbd9dbb1bd276726c0394a50b047697111d7 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_Fusion package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html
index 6cfac51034dda0967a8284e2f272a366c483f1a6..074ae169138afb5269096b93300edd9c100382c8 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -52,11 +54,11 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">
-<span id="multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module"></span><h2>multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module<a class="headerlink" href="#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module">
+<h2>multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module<a class="headerlink" href="#multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -70,8 +72,8 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.html b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.html
index b95551a1bc8b4292834ef0d0c4fc537db399a25c..c3de5a4286e4afa45feaa1f773695c95a1d6e754 100644
--- a/docs/build/references/multiview_platform.tests.test_multiview_classifiers.html
+++ b/docs/build/references/multiview_platform.tests.test_multiview_classifiers.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_multiview_classifiers package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -87,8 +89,8 @@
 </li>
 <li class="toctree-l1"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package</a><ul>
 <li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#submodules">Submodules</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
-<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure">Module contents</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#multiview-platform-tests-test-multiview-classifiers-test-pseudocqmeasure-test-pseudocqfusionmodule-module">multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.test_PseudoCQFusionModule module</a></li>
+<li class="toctree-l2"><a class="reference internal" href="multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.html#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
@@ -97,11 +99,11 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">
-<span id="multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module"></span><h2>multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module<a class="headerlink" href="#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module">
+<h2>multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module<a class="headerlink" href="#multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_multiview_classifiers">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_multiview_classifiers" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -116,8 +118,8 @@
 <li><a class="reference internal" href="#">multiview_platform.tests.test_multiview_classifiers package</a><ul>
 <li><a class="reference internal" href="#subpackages">Subpackages</a></li>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_multiview_classifiers.test_diversity_utils">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_multiview_classifiers">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-multiview-classifiers-test-diversity-utils-module">multiview_platform.tests.test_multiview_classifiers.test_diversity_utils module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/references/multiview_platform.tests.test_utils.html b/docs/build/references/multiview_platform.tests.test_utils.html
index fe1e83c09ef97bb158d6100137b4a37905163784..c36eadd4593c441ef981d2f6700448b450b3140b 100644
--- a/docs/build/references/multiview_platform.tests.test_utils.html
+++ b/docs/build/references/multiview_platform.tests.test_utils.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>multiview_platform.tests.test_utils package &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -47,473 +49,20 @@
 <div class="section" id="submodules">
 <h2>Submodules<a class="headerlink" href="#submodules" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_utils.test_GetMultiviewDB">
-<span id="multiview-platform-tests-test-utils-test-getmultiviewdb-module"></span><h2>multiview_platform.tests.test_utils.test_GetMultiviewDB module<a class="headerlink" href="#module-multiview_platform.tests.test_utils.test_GetMultiviewDB" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv">
-<em class="property">class </em><code class="sig-name descname">Test_get_classic_db_csv</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.tearDown">
-<em class="property">classmethod </em><code class="sig-name descname">tearDown</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.tearDown" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the test fixture after testing it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5">
-<em class="property">class </em><code class="sig-name descname">Test_get_classic_db_hdf5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.tearDown">
-<code class="sig-name descname">tearDown</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.tearDown" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the test fixture after testing it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_all_views_asked">
-<code class="sig-name descname">test_all_views_asked</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_all_views_asked" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_asked_the_whole_dataset">
-<code class="sig-name descname">test_asked_the_whole_dataset</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_asked_the_whole_dataset" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5">
-<em class="property">class </em><code class="sig-name descname">Test_get_plausible_db_hdf5</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.tearDownClass">
-<em class="property">classmethod </em><code class="sig-name descname">tearDownClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.tearDownClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the class fixture after running all tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_two_class">
-<code class="sig-name descname">test_two_class</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5.test_two_class" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-utils-test-getmultiviewdb-module">
+<h2>multiview_platform.tests.test_utils.test_GetMultiviewDB module<a class="headerlink" href="#multiview-platform-tests-test-utils-test-getmultiviewdb-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_utils.test_configuration">
-<span id="multiview-platform-tests-test-utils-test-configuration-module"></span><h2>multiview_platform.tests.test_utils.test_configuration module<a class="headerlink" href="#module-multiview_platform.tests.test_utils.test_configuration" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args">
-<em class="property">class </em><code class="sig-name descname">Test_get_the_args</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.tearDownClass">
-<em class="property">classmethod </em><code class="sig-name descname">tearDownClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.tearDownClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the class fixture after running all tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_arguments">
-<code class="sig-name descname">test_arguments</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_arguments" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_dict_format">
-<code class="sig-name descname">test_dict_format</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_dict_format" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_file_loading">
-<code class="sig-name descname">test_file_loading</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_configuration.Test_get_the_args.test_file_loading" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-utils-test-configuration-module">
+<h2>multiview_platform.tests.test_utils.test_configuration module<a class="headerlink" href="#multiview-platform-tests-test-utils-test-configuration-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_utils.test_execution">
-<span id="multiview-platform-tests-test-utils-test-execution-module"></span><h2>multiview_platform.tests.test_utils.test_execution module<a class="headerlink" href="#module-multiview_platform.tests.test_utils.test_execution" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.FakeArg">
-<em class="property">class </em><code class="sig-name descname">FakeArg</code><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.FakeArg" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries">
-<em class="property">class </em><code class="sig-name descname">Test_genArgumentDictionaries</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames">
-<em class="property">class </em><code class="sig-name descname">Test_genDirecortiesNames</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_ovo_no_iter">
-<code class="sig-name descname">test_ovo_no_iter</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_ovo_no_iter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_simple_ovo">
-<code class="sig-name descname">test_simple_ovo</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames.test_simple_ovo" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genKFolds">
-<em class="property">class </em><code class="sig-name descname">Test_genKFolds</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genKFolds" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genKFolds.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genKFolds.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genKFolds.test_genKFolds_iter">
-<code class="sig-name descname">test_genKFolds_iter</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genKFolds.test_genKFolds_iter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genSplits">
-<em class="property">class </em><code class="sig-name descname">Test_genSplits</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genSplits" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genSplits.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genSplits.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_genSplits_no_iter">
-<code class="sig-name descname">test_genSplits_no_iter</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_genSplits_no_iter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_genSplits.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction">
-<em class="property">class </em><code class="sig-name descname">Test_getDatabaseFunction</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_hdf5">
-<code class="sig-name descname">test_hdf5</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_plausible_hdf5">
-<code class="sig-name descname">test_plausible_hdf5</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_plausible_hdf5" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_simple">
-<code class="sig-name descname">test_simple</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction.test_simple" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initRandomState">
-<em class="property">class </em><code class="sig-name descname">Test_initRandomState</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initRandomState" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initRandomState.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initRandomState.tearDown">
-<code class="sig-name descname">tearDown</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.tearDown" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for deconstructing the test fixture after testing it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_42">
-<code class="sig-name descname">test_random_state_42</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_42" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_pickle">
-<code class="sig-name descname">test_random_state_pickle</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initRandomState.test_random_state_pickle" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates">
-<em class="property">class </em><code class="sig-name descname">Test_initStatsIterRandomStates</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_multiple_iter">
-<code class="sig-name descname">test_multiple_iter</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_multiple_iter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_one_statiter">
-<code class="sig-name descname">test_one_statiter</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates.test_one_statiter" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs">
-<em class="property">class </em><code class="sig-name descname">Test_parseTheArgs</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.setUp">
-<code class="sig-name descname">setUp</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.setUp" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up the test fixture before exercising it.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.test_empty_args">
-<code class="sig-name descname">test_empty_args</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs.test_empty_args" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-utils-test-execution-module">
+<h2>multiview_platform.tests.test_utils.test_execution module<a class="headerlink" href="#multiview-platform-tests-test-utils-test-execution-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_utils.test_multiclass">
-<span id="multiview-platform-tests-test-utils-test-multiclass-module"></span><h2>multiview_platform.tests.test_utils.test_multiclass module<a class="headerlink" href="#module-multiview_platform.tests.test_utils.test_multiclass" title="Permalink to this headline">¶</a></h2>
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeDset">
-<em class="property">class </em><code class="sig-name descname">FakeDset</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">n_examples</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeDset" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeDset.get_nb_examples">
-<code class="sig-name descname">get_nb_examples</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeDset.get_nb_examples" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative">
-<em class="property">class </em><code class="sig-name descname">FakeEstimNative</code><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim" title="multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative.accepts_multi_class">
-<code class="sig-name descname">accepts_multi_class</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative.accepts_multi_class" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim">
-<em class="property">class </em><code class="sig-name descname">FakeMCEstim</code><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.base.BaseEstimator</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim.accepts_multi_class">
-<code class="sig-name descname">accepts_multi_class</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">random_state</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim.accepts_multi_class" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier">
-<em class="property">class </em><code class="sig-name descname">FakeMVClassifier</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">short_name</span><span class="o">=</span><span class="default_value">'None'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.base.BaseEstimator</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.fit">
-<code class="sig-name descname">fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">y</span></em>, <em class="sig-param"><span class="n">train_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.fit" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.predict">
-<code class="sig-name descname">predict</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier.predict" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb">
-<em class="property">class </em><code class="sig-name descname">FakeMVClassifierProb</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">short_name</span><span class="o">=</span><span class="default_value">'None'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier" title="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb.predict_proba">
-<code class="sig-name descname">predict_proba</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">X</span></em>, <em class="sig-param"><span class="n">example_indices</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">view_indices</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb.predict_proba" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeNonProbaEstim">
-<em class="property">class </em><code class="sig-name descname">FakeNonProbaEstim</code><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeNonProbaEstim" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim" title="multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim</span></code></a></p>
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim">
-<em class="property">class </em><code class="sig-name descname">FakeProbaEstim</code><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <a class="reference internal" href="#multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim" title="multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim"><code class="xref py py-class docutils literal notranslate"><span class="pre">multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim</span></code></a></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim.predict_proba">
-<code class="sig-name descname">predict_proba</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim.predict_proba" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit">
-<em class="property">class </em><code class="sig-name descname">Test_MultiviewOVOWrapper_fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_fit">
-<code class="sig-name descname">test_fit</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_fit" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_predict">
-<code class="sig-name descname">test_predict</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit.test_predict" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit">
-<em class="property">class </em><code class="sig-name descname">Test_MultiviewOVRWrapper_fit</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_fit">
-<code class="sig-name descname">test_fit</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_fit" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_predict">
-<code class="sig-name descname">test_predict</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit.test_predict" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
-<dl class="py class">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim">
-<em class="property">class </em><code class="sig-name descname">Test_get_mc_estim</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">methodName</span><span class="o">=</span><span class="default_value">'runTest'</span></em><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim" title="Permalink to this definition">¶</a></dt>
-<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">unittest.case.TestCase</span></code></p>
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.setUpClass">
-<em class="property">classmethod </em><code class="sig-name descname">setUpClass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.setUpClass" title="Permalink to this definition">¶</a></dt>
-<dd><p>Hook method for setting up class fixture before running tests in the class.</p>
-</dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_biclass">
-<code class="sig-name descname">test_biclass</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_biclass" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_native">
-<code class="sig-name descname">test_multiclass_native</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_native" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo">
-<code class="sig-name descname">test_multiclass_ovo</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo_multiview">
-<code class="sig-name descname">test_multiclass_ovo_multiview</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovo_multiview" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr">
-<code class="sig-name descname">test_multiclass_ovr</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-<dl class="py method">
-<dt id="multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr_multiview">
-<code class="sig-name descname">test_multiclass_ovr_multiview</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim.test_multiclass_ovr_multiview" title="Permalink to this definition">¶</a></dt>
-<dd></dd></dl>
-
-</dd></dl>
-
+<div class="section" id="multiview-platform-tests-test-utils-test-multiclass-module">
+<h2>multiview_platform.tests.test_utils.test_multiclass module<a class="headerlink" href="#multiview-platform-tests-test-utils-test-multiclass-module" title="Permalink to this headline">¶</a></h2>
 </div>
-<div class="section" id="module-multiview_platform.tests.test_utils">
-<span id="module-contents"></span><h2>Module contents<a class="headerlink" href="#module-multiview_platform.tests.test_utils" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="module-contents">
+<h2>Module contents<a class="headerlink" href="#module-contents" title="Permalink to this headline">¶</a></h2>
 </div>
 </div>
 
@@ -527,11 +76,11 @@
   <ul>
 <li><a class="reference internal" href="#">multiview_platform.tests.test_utils package</a><ul>
 <li><a class="reference internal" href="#submodules">Submodules</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_utils.test_GetMultiviewDB">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_utils.test_configuration">multiview_platform.tests.test_utils.test_configuration module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_utils.test_execution">multiview_platform.tests.test_utils.test_execution module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_utils.test_multiclass">multiview_platform.tests.test_utils.test_multiclass module</a></li>
-<li><a class="reference internal" href="#module-multiview_platform.tests.test_utils">Module contents</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-utils-test-getmultiviewdb-module">multiview_platform.tests.test_utils.test_GetMultiviewDB module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-utils-test-configuration-module">multiview_platform.tests.test_utils.test_configuration module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-utils-test-execution-module">multiview_platform.tests.test_utils.test_execution module</a></li>
+<li><a class="reference internal" href="#multiview-platform-tests-test-utils-test-multiclass-module">multiview_platform.tests.test_utils.test_multiclass module</a></li>
+<li><a class="reference internal" href="#module-contents">Module contents</a></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/build/search.html b/docs/build/search.html
index 56361635d545f45feacd6126c88ca0b092cad5e3..e77a43482b28f095bb23af17256807f9502b9f37 100644
--- a/docs/build/search.html
+++ b/docs/build/search.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Search &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
diff --git a/docs/build/searchindex.js b/docs/build/searchindex.js
index e85d648f0a3e1fd7b385973ad495e7dae4e59f33..414955753b3f5c86ffdb0f4fef8a11afe51866e0 100644
--- a/docs/build/searchindex.js
+++ b/docs/build/searchindex.js
@@ -1 +1 @@
-Search.setIndex({docnames:["analyzeresult","api","execution","index","modules","readme_link","references/monomulti/exec_classif","references/monomulti/metrics","references/monomulti/multiview_classifiers/classifiers","references/monomulti/multiview_classifiers/diversity_fusion","references/monomulti/utils/execution","references/monomulti/utils/multiclass","references/monomultidoc","references/multiview_platform","references/multiview_platform.mono_multi_view_classifiers","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion","references/multiview_platform.mono_multi_view_classifiers.utils","references/multiview_platform.tests","references/multiview_platform.tests.test_metrics","references/multiview_platform.tests.test_mono_view","references/multiview_platform.tests.test_monoview_classifiers","references/multiview_platform.tests.test_multiview_classifiers","references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure","references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion","references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure","references/multiview_platform.tests.test_utils","tutorials/example0","tutorials/example1","tutorials/example2","tutorials/example3","tutorials/example4","tutorials/example5","tutorials/hps_theory","tutorials/index","tutorials/installation"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,sphinx:56},filenames:["analyzeresult.rst","api.rst","execution.rst","index.rst","modules.rst","readme_link.rst","references/monomulti/exec_classif.rst","references/monomulti/metrics.rst","references/monomulti/multiview_classifiers/classifiers.rst","references/monomulti/multiview_classifiers/diversity_fusion.rst","references/monomulti/utils/execution.rst","references/monomulti/utils/multiclass.rst","references/monomultidoc.rst","references/multiview_platform.rst","references/multiview_platform.mono_multi_view_classifiers.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.utils.rst","references/multiview_platform.tests.rst","references/multiview_platform.tests.test_metrics.rst","references/multiview_platform.tests.test_mono_view.rst","references/multiview_platform.tests.test_monoview_classifiers.rst","references/multiview_platform.tests.test_multiview_classifiers.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.rst","references/multiview_platform.tests.test_utils.rst","tutorials/example0.rst","tutorials/example1.rst","tutorials/example2.rst","tutorials/example3.rst","tutorials/example4.rst","tutorials/example5.rst","tutorials/hps_theory.rst","tutorials/index.rst","tutorials/installation.rst"],objects:{"":{multiview_platform:[13,0,0,"-"]},"multiview_platform.execute":{execute:[13,1,1,""]},"multiview_platform.mono_multi_view_classifiers":{exec_classif:[14,0,0,"-"],multiview_classifiers:[15,0,0,"-"],result_analysis:[14,0,0,"-"],utils:[27,0,0,"-"]},"multiview_platform.mono_multi_view_classifiers.exec_classif":{arange_metrics:[14,1,1,""],benchmark_init:[14,1,1,""],exec_benchmark:[14,1,1,""],exec_classif:[14,1,1,""],exec_one_benchmark_mono_core:[14,1,1,""],extract_dict:[14,1,1,""],gen_single_monoview_arg_dictionary:[14,1,1,""],gen_single_multiview_arg_dictionary:[14,1,1,""],get_path_dict:[14,1,1,""],init_argument_dictionaries:[14,1,1,""],init_benchmark:[14,1,1,""],init_kwargs:[14,1,1,""],init_kwargs_func:[14,1,1,""],init_monoview_exps:[14,1,1,""],init_multiview_exps:[14,1,1,""],is_dict_in:[14,1,1,""],set_element:[14,1,1,""]},"multiview_platform.mono_multi_view_classifiers.metrics":{framework:[7,0,0,"-"]},"multiview_platform.mono_multi_view_classifiers.metrics.framework":{get_config:[7,1,1,""],get_scorer:[7,1,1,""],score:[7,1,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers":{difficulty_fusion:[16,0,0,"-"],disagree_fusion:[17,0,0,"-"],double_fault_fusion:[18,0,0,"-"],entropy_fusion:[19,0,0,"-"]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion":{DifficultyFusion:[16,2,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.DifficultyFusion":{diversity_measure:[16,3,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion":{DisagreeFusion:[17,2,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.DisagreeFusion":{diversity_measure:[17,3,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion":{DoubleFaultFusion:[18,2,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.DoubleFaultFusion":{diversity_measure:[18,3,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion":{EntropyFusion:[19,2,1,""]},"multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.EntropyFusion":{diversity_measure:[19,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils":{configuration:[27,0,0,"-"],dataset:[27,0,0,"-"],execution:[27,0,0,"-"],get_multiview_db:[27,0,0,"-"],hyper_parameter_search:[27,0,0,"-"],make_file_config:[27,0,0,"-"],multiclass:[27,0,0,"-"],multiview_result_analysis:[27,0,0,"-"],transformations:[27,0,0,"-"]},"multiview_platform.mono_multi_view_classifiers.utils.configuration":{get_the_args:[27,1,1,""],pass_default_config:[27,1,1,""],save_config:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.dataset":{Dataset:[27,2,1,""],HDF5Dataset:[27,2,1,""],RAMDataset:[27,2,1,""],confirm:[27,1,1,""],copy_hdf5:[27,1,1,""],datasets_already_exist:[27,1,1,""],delete_HDF5:[27,1,1,""],extract_subset:[27,1,1,""],get_examples_views_indices:[27,1,1,""],init_multiple_datasets:[27,1,1,""],input_:[27,1,1,""],is_just_number:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.dataset.Dataset":{check_selected_label_names:[27,3,1,""],filter:[27,3,1,""],get_label_names:[27,3,1,""],get_labels:[27,3,1,""],get_nb_examples:[27,3,1,""],get_shape:[27,3,1,""],get_v:[27,3,1,""],init_example_indces:[27,3,1,""],select_labels:[27,3,1,""],select_views_and_labels:[27,3,1,""],to_numpy_array:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.dataset.HDF5Dataset":{add_gaussian_noise:[27,3,1,""],copy_view:[27,3,1,""],dataset:[27,4,1,""],filter:[27,3,1,""],get_label_names:[27,3,1,""],get_labels:[27,3,1,""],get_name:[27,3,1,""],get_nb_class:[27,3,1,""],get_nb_examples:[27,3,1,""],get_v:[27,3,1,""],get_view_dict:[27,3,1,""],get_view_name:[27,3,1,""],init_attrs:[27,3,1,""],init_view_names:[27,3,1,""],nb_view:[27,4,1,""],rm:[27,3,1,""],update_hdf5_dataset:[27,3,1,""],view_dict:[27,4,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.dataset.RAMDataset":{filter:[27,3,1,""],get_label_names:[27,3,1,""],get_labels:[27,3,1,""],get_name:[27,3,1,""],get_nb_class:[27,3,1,""],get_nb_examples:[27,3,1,""],get_v:[27,3,1,""],get_view_dict:[27,3,1,""],get_view_name:[27,3,1,""],init_attrs:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.execution":{find_dataset_names:[27,1,1,""],gen_argument_dictionaries:[27,1,1,""],gen_direcorties_names:[27,1,1,""],gen_k_folds:[27,1,1,""],gen_splits:[27,1,1,""],get_database_function:[27,1,1,""],init_log_file:[27,1,1,""],init_random_state:[27,1,1,""],init_stats_iter_random_states:[27,1,1,""],init_views:[27,1,1,""],parse_the_args:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db":{DatasetError:[27,5,1,""],get_classic_db_csv:[27,1,1,""],get_classic_db_hdf5:[27,1,1,""],get_plausible_db_hdf5:[27,1,1,""],make_me_noisy:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search":{CustomRandint:[27,2,1,""],CustomUniform:[27,2,1,""],Grid:[27,2,1,""],HPSearch:[27,2,1,""],Random:[27,2,1,""],format_params:[27,1,1,""],gen_heat_maps:[27,1,1,""],spear_mint:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomRandint":{get_nb_possibilities:[27,3,1,""],rvs:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.CustomUniform":{rvs:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Grid":{fit:[27,3,1,""],get_candidate_params:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.HPSearch":{fit_multiview:[27,3,1,""],gen_report:[27,3,1,""],get_best_params:[27,3,1,""],get_candidate_params:[27,3,1,""],get_scoring:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search.Random":{fit:[27,3,1,""],get_candidate_params:[27,3,1,""],get_param_distribs:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.make_file_config":{ConfigurationMaker:[27,2,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass":{MonoviewWrapper:[27,2,1,""],MultiClassWrapper:[27,2,1,""],MultiviewOVOWrapper:[27,2,1,""],MultiviewOVRWrapper:[27,2,1,""],MultiviewWrapper:[27,2,1,""],OVOWrapper:[27,2,1,""],OVRWrapper:[27,2,1,""],get_mc_estim:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiClassWrapper":{format_params:[27,3,1,""],get_config:[27,3,1,""],get_interpretation:[27,3,1,""],set_params:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVOWrapper":{fit:[27,3,1,""],get_params:[27,3,1,""],multiview_decision_function:[27,3,1,""],predict:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass.MultiviewOVRWrapper":{fit:[27,3,1,""],get_params:[27,3,1,""],predict:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVOWrapper":{decision_function:[27,3,1,""],get_params:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiclass.OVRWrapper":{get_params:[27,3,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.multiview_result_analysis":{get_metrics_scores:[27,1,1,""],get_total_metric_scores:[27,1,1,""],print_metric_score:[27,1,1,""]},"multiview_platform.mono_multi_view_classifiers.utils.transformations":{sign_labels:[27,1,1,""],unsign_labels:[27,1,1,""]},"multiview_platform.tests":{test_metrics:[29,0,0,"-"],test_mono_view:[30,0,0,"-"],test_monoview_classifiers:[31,0,0,"-"],test_multiview_classifiers:[32,0,0,"-"],test_utils:[39,0,0,"-"]},"multiview_platform.tests.test_metrics":{test_accuracy_score:[29,0,0,"-"]},"multiview_platform.tests.test_metrics.test_accuracy_score":{Test_accuracy_score:[29,2,1,""]},"multiview_platform.tests.test_metrics.test_accuracy_score.Test_accuracy_score":{score_test:[29,3,1,""],setUpClass:[29,3,1,""]},"multiview_platform.tests.test_mono_view":{test_ExecClassifMonoView:[30,0,0,"-"],test_MonoviewUtils:[30,0,0,"-"]},"multiview_platform.tests.test_mono_view.test_ExecClassifMonoView":{Test_getHPs:[30,2,1,""],Test_initConstants:[30,2,1,""],Test_initTrainTest:[30,2,1,""]},"multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_getHPs":{setUpClass:[30,3,1,""],tearDownClass:[30,3,1,""],test_simple:[30,3,1,""]},"multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initConstants":{setUpClass:[30,3,1,""],tearDownClass:[30,3,1,""],test_simple:[30,3,1,""]},"multiview_platform.tests.test_mono_view.test_ExecClassifMonoView.Test_initTrainTest":{setUpClass:[30,3,1,""],test_simple:[30,3,1,""]},"multiview_platform.tests.test_mono_view.test_MonoviewUtils":{Test_genTestFoldsPreds:[30,2,1,""]},"multiview_platform.tests.test_mono_view.test_MonoviewUtils.Test_genTestFoldsPreds":{setUpClass:[30,3,1,""],test_simple:[30,3,1,""]},"multiview_platform.tests.test_monoview_classifiers":{test_adaboost:[31,0,0,"-"],test_compatibility:[31,0,0,"-"]},"multiview_platform.tests.test_multiview_classifiers":{Test_PseudoCQMeasure:[38,0,0,"-"],test_diversity_utils:[32,0,0,"-"]},"multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure":{test_PseudoCQFusionModule:[38,0,0,"-"]},"multiview_platform.tests.test_utils":{test_GetMultiviewDB:[39,0,0,"-"],test_configuration:[39,0,0,"-"],test_execution:[39,0,0,"-"],test_multiclass:[39,0,0,"-"]},"multiview_platform.tests.test_utils.test_GetMultiviewDB":{Test_get_classic_db_csv:[39,2,1,""],Test_get_classic_db_hdf5:[39,2,1,""],Test_get_plausible_db_hdf5:[39,2,1,""]},"multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_csv":{setUp:[39,3,1,""],tearDown:[39,3,1,""],test_simple:[39,3,1,""]},"multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_classic_db_hdf5":{setUp:[39,3,1,""],tearDown:[39,3,1,""],test_all_views_asked:[39,3,1,""],test_asked_the_whole_dataset:[39,3,1,""],test_simple:[39,3,1,""]},"multiview_platform.tests.test_utils.test_GetMultiviewDB.Test_get_plausible_db_hdf5":{setUpClass:[39,3,1,""],tearDownClass:[39,3,1,""],test_simple:[39,3,1,""],test_two_class:[39,3,1,""]},"multiview_platform.tests.test_utils.test_configuration":{Test_get_the_args:[39,2,1,""]},"multiview_platform.tests.test_utils.test_configuration.Test_get_the_args":{setUpClass:[39,3,1,""],tearDownClass:[39,3,1,""],test_arguments:[39,3,1,""],test_dict_format:[39,3,1,""],test_file_loading:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution":{FakeArg:[39,2,1,""],Test_genArgumentDictionaries:[39,2,1,""],Test_genDirecortiesNames:[39,2,1,""],Test_genKFolds:[39,2,1,""],Test_genSplits:[39,2,1,""],Test_getDatabaseFunction:[39,2,1,""],Test_initRandomState:[39,2,1,""],Test_initStatsIterRandomStates:[39,2,1,""],Test_parseTheArgs:[39,2,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_genArgumentDictionaries":{setUpClass:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_genDirecortiesNames":{setUpClass:[39,3,1,""],test_ovo_no_iter:[39,3,1,""],test_simple_ovo:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_genKFolds":{setUp:[39,3,1,""],test_genKFolds_iter:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_genSplits":{setUp:[39,3,1,""],test_genSplits_no_iter:[39,3,1,""],test_simple:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_getDatabaseFunction":{setUpClass:[39,3,1,""],test_hdf5:[39,3,1,""],test_plausible_hdf5:[39,3,1,""],test_simple:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_initRandomState":{setUp:[39,3,1,""],tearDown:[39,3,1,""],test_random_state_42:[39,3,1,""],test_random_state_pickle:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_initStatsIterRandomStates":{setUpClass:[39,3,1,""],test_multiple_iter:[39,3,1,""],test_one_statiter:[39,3,1,""]},"multiview_platform.tests.test_utils.test_execution.Test_parseTheArgs":{setUp:[39,3,1,""],test_empty_args:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass":{FakeDset:[39,2,1,""],FakeEstimNative:[39,2,1,""],FakeMCEstim:[39,2,1,""],FakeMVClassifier:[39,2,1,""],FakeMVClassifierProb:[39,2,1,""],FakeNonProbaEstim:[39,2,1,""],FakeProbaEstim:[39,2,1,""],Test_MultiviewOVOWrapper_fit:[39,2,1,""],Test_MultiviewOVRWrapper_fit:[39,2,1,""],Test_get_mc_estim:[39,2,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeDset":{get_nb_examples:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeEstimNative":{accepts_multi_class:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeMCEstim":{accepts_multi_class:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifier":{fit:[39,3,1,""],predict:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeMVClassifierProb":{predict_proba:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.FakeProbaEstim":{predict_proba:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVOWrapper_fit":{setUpClass:[39,3,1,""],test_fit:[39,3,1,""],test_predict:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.Test_MultiviewOVRWrapper_fit":{setUpClass:[39,3,1,""],test_fit:[39,3,1,""],test_predict:[39,3,1,""]},"multiview_platform.tests.test_utils.test_multiclass.Test_get_mc_estim":{setUpClass:[39,3,1,""],test_biclass:[39,3,1,""],test_multiclass_native:[39,3,1,""],test_multiclass_ovo:[39,3,1,""],test_multiclass_ovo_multiview:[39,3,1,""],test_multiclass_ovr:[39,3,1,""],test_multiclass_ovr_multiview:[39,3,1,""]},"multiview_platform.versions":{test_versions:[13,1,1,""]},multiview_platform:{execute:[13,0,0,"-"],mono_multi_view_classifiers:[14,0,0,"-"],tests:[28,0,0,"-"],versions:[13,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute","5":"py:exception"},terms:{"0x7f01ddfc24c8":41,"0x7fc3cbffd678":27,"0x7fc7d2dfb048":[],"0x7ff2cf1b7048":[],"14_12":43,"15_42":43,"200x100":44,"200x40":44,"200x55":44,"2020_04_02":43,"20v3":[],"25th":41,"2d_plot_data":[41,43],"abstract":27,"boolean":[7,27,45],"case":[3,27,29,30,39,45],"class":[7,16,17,18,19,27,29,30,39,41,43,44,45,46],"default":[7,27,42],"final":41,"float":[7,27,45,46],"function":[6,7,14,27,41,45],"import":[27,40,41,42,43,44,45],"int":[6,14,27],"long":[44,46],"new":[27,43,45],"return":[6,7,14,27,40,43,45],"short":3,"super":45,"true":[6,14,27,41,42,45,46],"try":[42,44,46],"while":[27,42,43,44,46],And:[42,44],DTs:40,For:[6,14,27,40,41,42,43,45,46],IDs:44,One:[27,45,46],THe:27,That:40,The:[3,6,7,14,27,40,41,43,45,46,47],Then:[41,44,45,48],These:[7,40,41,42,45],Ths:46,Use:47,Used:[6,13,14,27],With:40,Yes:27,__init__:45,_gen:27,_iter:46,_search:27,_view:46,abl:[27,40,42,44,45],about:45,absolut:[],accepts_multi_class:39,access:45,accord:[27,42],accuraci:[40,41,42,46],accuracy_scor:[27,42,43],adaboost:[6,14,42,43],add:[3,6,7,14,27,42,44,45],add_gaussian_nois:27,add_nois:27,added:[6,14,45],adding:[27,44],addit:[16,17,18,19],advantag:3,after:[30,39,41,43],aggreg:40,agrument:27,aim:[],aks:27,alexi:[],algo:45,algo_modul:45,algoclassifi:45,algorithm:[6,14,27,40,41,42,46,47],algos_monoview:[27,41,42],algos_multiview:[27,41,42],all:[3,6,13,14,27,30,39,40,42,43,44],all_view:27,allow:[3,27,41,42,44,45,46],along:27,alreadi:[27,40,42],also:[27,41,42,43,44],alter:41,among:41,amongst:41,amount:42,analysi:[1,40,43,44],analyz:[6,14,41,42,44,46],analyze_iter:[6,14],analyze_result:[13,14,15],ani:[6,14,27,40,46],anim:44,anoth:40,answer:27,appear:43,append:45,approxim:42,arange_metr:[6,14],are_spars:27,arg:[6,14,27],arg_view:27,argmax:27,argument:[6,7,14,27,41,42,43,45,46],argument_dictionari:[6,14,27],around:[],arrai:[6,7,14,27,44],arrang:[6,14],artifact:44,ask:[6,14,27,40,45],asked_labels_nam:27,askedlabelsnam:27,assess:40,astyp:44,atm:44,attr:44,attribut:[27,44,45],author:[],automat:[],avail:[27,40,41,42,44,45],averag:41,avoid:[41,42,45],axi:[27,40],badg:[],balanc:41,baptist:[],bar:[40,41,43],bar_plot_data:[41,43],base:[16,17,18,19,27,29,30,39,42],base_estim:[6,14,42],base_file_nam:[27,45],baseestim:39,basemonoviewclassifi:45,basemultiviewclassifi:45,basic:[40,41],bauvin:[],bayesianinfer:[13,14,15,22,23],bear:44,bear_112:44,becaus:[27,41],becom:42,been:[3,41,44],befor:[27,29,30,39,45],begin:41,behavior:40,being:[3,6,14,44,46],below:[41,42],benchmark:[6,14,27,40,41,42,43,44],benchmark_arguments_dictionari:[6,14],benchmark_init:[6,14],benchmarkargumentdictionari:27,benchmarkargumentsdictionari:27,benielli:[],best:[27,40,41,42,43,45,46],better:[7,41,42,43],between:[27,42,43,44,45,46],biclass:[6,14,27],big:[27,47],bigger:42,bird:44,bird_785:44,bit:45,black:[40,41],blob:[],blue:[],bool:27,both:[40,42],bracket:42,brows:[27,45],build:[41,42],bypass:46,call:[41,45],callabl:7,can:[6,14,27,40,41,42,43,44,45,46],capac:46,car:44,car_369:44,carefulli:[],center:27,chang:27,check:[6,14,27],check_selected_label_nam:27,choic:42,choos:42,chose:44,cl_type:[6,14,27],clariti:44,classier:27,classif:[14,27,41,46],classifi:[3,6,12,14,27,40,42,43,44,46],classification_indic:[6,14],classifier_class_nam:45,classifier_config:[6,14,16,17,18,19,42],classifier_dict:27,classifier_nam:[6,14,16,17,18,19,27,42],classifiers_decis:[16,19],classifiers_nam:[6,14,42],classmethod:[30,39],clean:27,clear:41,clf_error:43,clone:[],close:43,closer:42,code:[2,3,13,42,43,45],color:40,column:[40,41],combin:[16,19,27,42],command:48,comment:41,commentari:44,commun:45,compar:[40,45],compat:[27,42],compens:[42,46],complementar:41,complementari:41,complementary_:41,complex:[27,41,42,46],compos:45,comput:[27,42,43,46],conain:27,concat_view:27,concaten:[27,40],concern:40,confid:27,config:[6,14,27,40,41,42,43,44,46],config_fil:[27,43],config_path:[6,13,14],configstr:7,configur:[7,13,14,40,41,42,46],configurationmak:27,confirm:27,confus:41,confusion_matrix:[41,43],conjunct:27,consensu:45,consequ:[41,42],consist:[3,40,41,44,46],consol:27,constantli:3,contain:[27,40,41,44,45],content:[1,3,12],control:[41,42,43,46,47],convert:[27,44],copi:[27,41],copy_hdf5:27,copy_view:27,core:[3,6,14,41],correct:45,correctli:44,correspond:[27,44,45],costli:43,could:[40,41,45],count:41,counterpart:40,coupl:[6,14,27],couplediversityfusionclassifi:[17,18],cover:42,creat:[6,14,27,44,45],create_dataset:44,create_group:44,criterion:[41,42],crop:27,cross:[6,14,27,41,42],csv:[27,40,43,44],current:43,customrandint:[27,45],customuniform:[27,45],data:[27,41,44,45,46],data_file_path:44,databas:[27,41],dataset:[3,6,13,14,40,42,43,46,47],dataset_var:[6,14,27,45],dataseterror:27,datasetfil:27,datasetlength:44,datasets_already_exist:27,date:41,deal:27,debug:27,decemb:41,decis:[27,40,41,42,45,46],decision_funct:27,decision_tre:[41,42,43,46],decisiontre:41,decisiontreeclassifi:42,decod:27,deconstruct:[30,39],decreas:42,decrypt:[7,41],deep:[27,46],deeper:46,def:45,defin:[42,44,45],delet:[6,14,27],delete_hdf5:[6,14,27],delimit:[27,44],demand:42,depend:[42,43,46],depth:[42,46],describ:[3,7,41,44],descript:27,descriptor:45,design:3,detail:[],detect:[40,41],develop:[40,44,48],deviat:43,dict:[6,7,14,27],dictionari:[6,7,14,27],dictionnari:27,did:42,differ:[3,6,14,27,41,42,43,44,46],difficult:41,difficulty_fus:[13,14,15],difficultyfus:16,digit:47,digit_col_grad_0:40,dimension:42,direct:40,directli:27,directori:[6,14,27,40,41,43,45,48],disagree_fus:[13,14,15],disagreefus:17,disambigu:27,discov:3,disk:27,dispach:[6,14],displai:[41,43],distinguish:43,distrib:45,distribut:[27,45,46],divers:12,diversity_measur:[16,17,18,19],diversity_util:[16,17,18,19],divid:46,doc:45,doc_summit:[41,42,43],docstr:13,document:[1,45],docutil:[],doe:27,doe_562:44,dominiqu:[],done:[6,14,27,43],dot:27,double_fault_fus:[13,14,15],doublefaultfus:18,doublet:27,download:48,drag:40,draw:[40,42,43,46],drive:[],dtype:44,due:41,durat:[41,42],durations_datafram:[41,43],durations_stds_datafram:43,dure:[40,41,42,46],each:[6,14,27,40,41,42,43,44,45,46],earli:40,earlier:[42,45],earlyfus:[13,14,15,22],earlyfusionpackag:[13,14,15,22,23],easi:3,easier:42,easili:44,edg:45,effici:[43,46],either:[27,40,41,42],els:27,empti:41,enabl:46,encapsul:[27,41],encod:44,end:[27,46],enter:27,entropi:42,entropy_fus:[13,14,15],entropyfus:19,enumer:44,environ:48,equal:[27,46],equivalent_draw:[27,42,46],error:[40,41,43],error_analysis_2d:43,error_analysis_bar:43,estim:[3,27,42,46],estimator__param:27,etc:7,evalu:[6,14,27,41,46],even:[27,42,43],exact:42,exampl:[6,14,27,46,47],example4:[],example_1:41,example_2_1_1:42,example_2_1_2:42,example_:41,example_error:43,example_id:[27,44],example_ids_path:44,example_indic:[27,39,45],examples_indic:27,except:27,exec_benchmark:[6,14],exec_classif:[6,13],exec_one_benchmark_mono_cor:[6,14],exect:1,execut:[2,12,14,40,41,42,43],exeperi:[6,14],exercis:39,exist:27,experi:[6,14,41],experiment:27,explain:[7,40,41,45,47],explor:43,extern:44,extract:[27,42],extract_dict:[6,14],extract_subset:27,extrat:27,f1_score:[27,41,43],fact:[42,46],factor:42,fail:[40,41,43],failur:41,fair:46,fairest:46,fake:27,fakearg:39,fakedset:39,fakeestimn:39,fakemcestim:39,fakemvclassifi:39,fakemvclassifierprob:39,fakenonprobaestim:39,fakeprobaestim:39,fals:[7,14,27,42,44,45],familiar:42,famou:40,far:[41,42],fashion:45,faster:46,fat_late_fus:[13,14,15],fat_scm_late_fus:[13,14,15],featru:41,featur:[3,27,40,41,43,44,45],feature_import:[41,43],feature_importances_datafram:[41,43],feature_importances_dataframe_std:43,fie:42,figur:[40,41,42,43,45],figure_nam:45,file:[6,7,14,27,40,42,43,44,45,46],file_nam:27,file_path:44,file_typ:27,fill:44,filter:27,find:[27,41,44],find_dataset_nam:27,first:[6,14,27,42,45,46,47],first_classifier_decis:[17,18],fit:[27,39,42,44,45,46],fit_multiview:27,fit_param:27,five:42,fix:[27,41,44],fixtur:[29,30,39],flag:14,focu:42,focus:42,fold:[6,14,27,41,42,43],folds_list:27,follow:[7,40,41,42,44,45,47,48],format:[40,41,44],format_dataset:[],format_param:27,four:45,frac:46,framework:[6,12,14,27],from:[7,27,40,41,42,43,44,45,46],full:[6,14,27,41,45],full_pr:[41,43],fulli:44,further:[],fusion:[12,13,14,15,40,41,42],futur:42,gap:44,gen_argument_dictionari:27,gen_direcorties_nam:27,gen_heat_map:27,gen_k_fold:27,gen_report:27,gen_single_monoview_arg_dictionari:14,gen_single_multiview_arg_dictionari:14,gen_split:27,gener:[6,14,27,42,46],generated_view_1:[41,43],generated_view_1feature_import:[41,43],generated_view_2:[41,43],generated_view_2feature_import:[41,43],generated_view_3:[41,43],generated_view_3feature_import:[41,43],generated_view_4:[41,43],generated_view_4feature_import:[41,43],genfromtxt:44,get:[3,6,7,14,27,42,43,44,45,47],get_best_param:27,get_candidate_param:27,get_classic_db_csv:27,get_classic_db_hdf5:27,get_config:[7,27],get_database_funct:27,get_examples_views_indic:[27,45],get_interpret:[27,45],get_label:27,get_label_nam:27,get_mc_estim:27,get_metrics_scor:27,get_multiview_db:[13,14],get_nam:27,get_nb_class:27,get_nb_exampl:[27,39],get_nb_poss:27,get_param:27,get_param_distrib:27,get_path_dict:[6,14],get_plausible_db_hdf5:27,get_scor:[7,27],get_shap:27,get_the_arg:27,get_total_metric_scor:27,get_v:27,get_view_dict:27,get_view_nam:27,getdatabas:27,gini:[41,42],gitlab:[],give:42,given:[6,14,27,40,42,46],globaldiversityfusionclassifi:[16,19],gnu:[],goal:[27,44,45],good:46,gpl:[],gradiant:40,grai:[40,41,43],grant:42,graph:41,great:[42,43],greater:7,grid:27,gridsearchcv:27,ground:7,group:[27,44],groupkfold:27,guaussian:27,guidelin:45,h5py:[27,44],hard:41,harder:41,hardwar:42,has:[3,27,41,42,43,44,45,46],have:[6,14,27,40,42,43,44,45,46],hdd:27,hdf5:[6,14,27,43,45],hdf5_file:[27,44],hdf5dataset:27,heat:27,help:42,here:[40,41,42,43,44,45],hide:[27,42],high:[27,42],higher:[41,42],highli:[41,42],homm:[],hook:[29,30,39],horizont:41,hover:41,how:[7,41,42,46,47],howev:[40,41,42,43,46],hps_arg:[42,46],hps_iter:27,hps_kwarg:[6,14,27],hps_method:[6,14],hps_report:42,hps_type:[27,42,46],hpsearch:27,hte:27,html:[40,43],http:[],huge:46,human:44,hyper:[6,14,27,41,43,45,47],hyper_param_search:[14,27],hyper_parameter_search:[13,14,45],hyperparm:27,idea:46,ideal:42,ids:44,imag:[40,41,44],img:[],impact:43,implement:[27,42,45],improv:[42,43],includ:45,incorrectli:41,increas:[42,43],inde:[41,43,45],independ:42,index:[3,6,14,27,41,44],indic:[6,14,27,45],indices_multiclass:27,individu:43,inform:[27,41,42,45],inherit:[10,11,45],init:[6,14,27],init_argument_dictionari:14,init_attr:27,init_benchmark:[6,14],init_example_indc:27,init_kwarg:[6,14],init_kwargs_func:[6,14],init_log_fil:27,init_monoview_exp:[6,14],init_multiple_dataset:27,init_multiview_exp:14,init_random_st:27,init_stats_iter_random_st:27,init_view:27,init_view_nam:27,initi:[6,14,27,44,45],input:[6,14,43,44,45],input_:27,insid:42,instal:[3,13,47],instanc:27,instead:27,instruct:[3,42],integ:[6,7,14,27,44,45],integr:45,interact:[40,41],interest:40,interpret:41,interpret_str:45,interpretstr:45,introduc:[27,41],invers:27,investig:40,involv:46,is_dict_in:[6,14],is_just_numb:27,is_temp:27,issu:[27,41,43],ist:[6,14],iter:[6,14,27,46,47],iter_1:43,iter_2:43,iter_3:43,iter_4:43,iter_5:43,iter_:43,its:[3,27,42,43,44,46],itself:[6,14],joblib:[],john_115:44,join:45,just:[6,14,42,44,45],k_fold:[6,14,27],keep:[27,43,44,46],kei:[6,7,14,27,44],key1:[6,14],key1_1:[6,14],key2:[6,14],keyword:[6,14],kfold:27,know:[40,42,46],knowledg:[45,46],kwarg:[6,7,14,27],kwargs_init:[6,14],l18:41,l22:41,l26:41,l35:41,l37:42,l43:41,l45:[41,42],l47:[41,42],l49:42,l52:41,l54:42,lab:[],label:[3,6,7,14,27,40,41,44,45],label_1:41,label_2:41,label_3:41,label_4:41,label_5:41,label_6:41,label_7:41,label_8:41,label_nam:[27,44],labels_combin:27,labels_data:44,labels_dataset:44,labels_dictionari:[6,14,27],labels_dset:44,labels_file_path:44,labels_nam:[27,44],larger:43,lassifi:40,last:46,late:[40,41,42],latefus:[13,14,15,22],latefusionpackag:[13,14,15,22,23],later:[27,41],lead:27,learn:[27,40,41,43,45,46],learning_indic:27,least:41,left:41,len:44,less:41,let:[41,42,43,44,45,46],letter:27,level:27,librari:[],licens:[],light:[40,41],like:[7,27,42,43,46],limit:27,line:[41,42,43,44,46],lis:[],list:[6,14,27,44],list_x:45,listof:[6,14],load:[27,44,45],loadabl:41,loc:27,locat:[6,14],log:[27,43],logfil:27,longer:[27,42],look:[42,43],lot:[41,42],low:27,lucki:43,m2r:[],made:[27,40,42,46],mai:[40,41],main:[3,6,14,27,41,43,45],mainli:41,major:[40,41,42],majorityvot:[13,14,15,22,23],make:[41,44],make_file_config:[13,14],make_me_noisi:27,make_scor:7,mandatori:[7,44,45],mani:[40,42],manipul:[],map:[6,14,27],master:[],matplotlib:[],matric:44,matrix:[27,40,41],matter:42,max:46,max_depth:[41,42,45,46],max_length:44,maximum:42,mayb:[27,41],mean:[41,42,43,46],mean_on_5_it:43,meaning:43,member:[10,11],memori:[27,45],metadata:44,metadata_group:44,method:[13,14,15,22,27,29,30,39,42,45,46],methodnam:[29,30,39],metric:[6,12,14,27,40,41,42],metric_modul:29,metric_princ:[6,14,27,42,46],metric_scor:27,metrics_var:27,micro:41,min:42,mind:[42,46],minimum:44,minut:[27,41],mis:[40,41],miss:44,missclassifi:41,mixli:41,mod:42,model:46,model_select:[6,14,27],modif:42,modifi:[3,27,43,44,45],modul:[1,2,3,12,42,45],moment:[42,44],mono:[3,6,14,27,40,42],mono_multi_view_classifi:[13,45],monoview:[6,14,27,40,41,42,43,46],monoview_algo:[6,14],monoview_classifi:45,monoview_estim:[16,17,18,19],monoview_util:45,monoviewwrapp:27,more:[27,43,44,46],moreov:[42,45],most:[27,41,42,44,46],mous:40,mt19937:27,mtrand:41,much:[27,42],multi:[3,27,41,42],multi_class_label:[6,14],multiclass:[6,7,12,13,14,41,45],multiclass_label:27,multiclasswrapp:27,multicor:[6,14,27],multilabel:27,multimod:[],multipl:[27,43],multipli:27,multivew:42,multiview:[3,6,14,27,40,41,42,43,44,46],multiview_algo:[6,14],multiview_classifi:[13,14,45],multiview_classifier_arg:[6,14],multiview_decision_funct:27,multiview_platform:[3,40,41,42,43,45],multiview_result_analysi:[13,14],multiview_util:45,multiviewovowrapp:27,multiviewovrwrapp:27,multiviewwrapp:27,musch:46,must:[7,27,44,45],mutli:[6,14],mutlipli:27,mutliview:[27,42,45],mutual:41,mutual_error_:41,n_class:27,n_estim:[6,14,42],n_exampl:39,n_featur:27,n_iter:[27,42,46],n_job:27,n_output:27,n_sampl:[7,27],n_view:42,naiv:42,name:[6,14,27,41,42,44,45],name_db:27,name_m:45,namedb:27,nativ:27,nb_class:[6,14,27],nb_core:[6,14,27],nb_exampl:27,nb_featur:27,nb_fold:[27,42,46],nb_label:[6,14,27],nb_view:27,nbclass:44,nbcore:27,nbview:44,ndarrai:[6,14,27],nearli:43,necess:42,necessari:27,need:[6,7,14,27,42,45,48],needed_input:45,new_mv_algo:45,new_mv_algo_modul:45,newmvalgo:45,newmvalgoclassifi:45,nice:27,nois:27,noise_std:27,noisi:27,none:[6,13,14,16,17,18,19,27,39,42,45,46],norm_typ:45,normal:27,notic:42,now:[42,43,44,45],number:[6,14,27,41,42,43,44,45,46],numer:44,numpi:[6,14,27,41,44],object:[6,7,14,27,39,41,44],obtain:42,off:42,onc:[40,45],one:[6,14,27,40,41,42,43,44,45,46],ones:[6,14,27,40,41,43,45,46],onevsoneclassifi:27,onevsrestclassifi:27,oni:27,onli:[27,40,42,44,45],optim:[6,14,27,41,45,47],option:[27,42],order:[6,7,14,27,41,42,43,44,45,46],org:[],organ:41,origin:27,other:[40,45],our:[42,45],outcom:27,outlier:[40,41],output:[41,42,43],output_file_nam:27,ov_wrapp:27,over:[27,42,43],overfit:[42,46],ovowrapp:27,ovrwrapp:27,own:47,packag:[3,6,7,13,48],page:[3,41,42],pair:27,panda:41,parallel:[27,43],param:27,param_1:45,param_2:45,param_distribut:27,param_grid:27,param_nam:45,paramet:[6,7,14,27,41,43,45,47],parametr:41,pars:[6,14,27,41],parse_the_arg:27,parsedargumentpars:[6,14],part:[41,42,45,46],partial:44,particular:41,pass:[6,14,27,46],pass_default_config:27,past:42,path:[6,14,27,44,45],path_f:27,path_for_new:27,path_to_config_fil:27,pathf:[27,44],pbject:27,peopl:44,per:[27,42],percentag:27,perdict_proba:27,perform:[3,6,14,27,41,42,43,46],perfrom:40,person:42,pickl:[27,43],pip:48,pipelin:[],plai:[],plane:44,plane_452:44,platform:[3,7,27,40,41,42,43,44,45,46,48],plausibl:[27,41],plot:[40,41,43],plotli:[],png:[40,43,45],point:27,poor:43,possibl:[27,40,41,42,45,46],potenti:40,precis:41,pred:27,predict:[7,27,39,45,46],predict_proba:39,pref:27,prefix:45,prerequisit:13,present:41,previou:[27,41,42,43,44],princip:[6,14,41],print:[41,43],print_metric_scor:27,prior:46,prioriti:42,probabl:45,problem:[27,41,42,46],procedur:3,process:[6,14,27,42,45,46],prod:[],profit:42,progress:[3,43,44],project:40,propos:46,provid:[27,41,42,44,45,46],pseudo:[42,43],pseudo_cq_fus:[13,14,15],publish:41,purpos:[6,14],pydata:[],pypi:[],python3:48,python:[42,44,45,48],pyyaml:[],quantiti:41,quick:40,quit:46,ram:27,ramdataset:27,randint:27,random:[27,41,43,45],random_s:45,random_st:[14,16,17,18,19,27,39,43,45,46],random_state_arg:27,randomisedsearchcv:42,randomizedsearchcv:27,randomli:[41,46],randomsatearg:27,randomsearch:27,randomst:[27,41],rang:27,rate:41,ratio:[27,41,42,46],read:3,readi:44,readm:5,recod:44,recommend:[41,48],rectangl:40,reduc:[42,45],redund:41,redundant_:41,ref:45,refer:[3,27,42,44],refit:27,regard:45,regress:27,regroup:[40,41],rel:[27,44],relat:[41,45],relev:[27,42,45,46],remot:42,remov:[3,27],repeat:46,report:[27,41],repositori:[],repres:[40,42,44],represent:41,reproduc:45,reproduct:[41,46],requir:[44,45,46,48],res_dir:[27,41],resourc:43,resp:27,rest:27,rest_of_the_arg:[6,14],restrain:45,result:[1,6,14,27,41,43,44,46],result_analysi:13,result_directori:27,results_directori:27,resum:[6,14,27],retriev:27,revers:[6,14],right:[6,14,27,41,44],robust:46,row:[40,41],run:[6,14,27,29,30,39,41,42,43,44,46,48],runtest:[29,30,39],rvs:27,s100:44,sai:[41,45],same:[27,40,41,42,44,45],sampl:[27,40],satisfi:44,sattist:27,save:[6,14,27,40,41,43,44,45],save_config:27,scalar:7,scale:27,scikit:45,scipi:[],scmforlinear:[13,14,15,22,23],score:[7,27,41,42,43],score_test:29,scorer:7,scores_arrai:27,script:44,search:3,second:[41,42],second_classifier_decis:[17,18],section:41,see:[27,40,41,42,45],seed:[27,41],seem:41,seen:[42,43],select:[27,46],select_label:27,select_views_and_label:27,selected_label_nam:27,self:[27,45],separ:27,serv:46,set:[6,14,27,29,30,39,40,41,42,43,44,45,46],set_el:[6,14],set_param:27,settl:43,setup:39,setupclass:[29,30,39],sever:[3,40,42,43,46],shade:43,shape:[7,27,41,44],share:27,shield:[],short_nam:39,shorter:46,should:[41,42,44,45],show:[27,40,41,43],sign_label:27,signal:27,similar:[40,41,45],similarli:[40,42,43,45],simplest:[41,46],simul:41,singl:[43,44],six:[],size:46,sklearn:[6,7,14,27,39,42,45,46],slice:27,slightli:43,small:42,solut:27,solv:[42,46],some:[27,40,41,42,43,45,47],soon:41,sore:44,sort:44,sotr:45,sould:41,sound:44,sourc:3,source_view_nam:27,space:[27,41,42],spare:[],spars:[27,44],spear_mint:27,spearmint:27,specif:[6,14,27,41,42,43],specifi:[6,7,14,27,41,42,46],spectacularli:41,spike:41,split:[6,14,27,41,43],split_ratio:27,splitter:[41,42],stabl:[],standard:43,star:41,start:[3,47],started_1560_12_25:[41,43],startl:41,state:[27,41,45],statist:[6,14,27,47],stats_it:[6,14,27,43],stats_iter_random_st:27,statsit:27,statu:[],std:[27,43],step:[42,45,47],stop:27,store:[6,7,14,27,41,42,43,44,45],str:[6,14,27],straight:43,stratifi:27,stratifiedkfold:27,stratifiedshufflesplit:46,string:[6,7,14,27,44,45],strongli:46,structur:[41,43],stuff:13,sub:46,submodul:15,subobject:27,subset:[27,42,46],succeed:43,succeerecd:40,success:41,sum:[27,48],summari:[41,43,45],summit:[3,42,43,45,46],summit_doc:41,supervis:[],supplementari:45,support:[27,44],suppos:[44,45,46],sure:44,svg:[],svm_jumbo_fus:27,svmforlinear:[13,14,15,22,23],symmetr:41,tabl:40,tabul:[],take:[41,42,46,47],target:[7,27,45],target_dataset:27,target_view_index:27,task:[40,42,46],teardown:39,teardownclass:[30,39],tell:27,temporari:27,term:[27,41,42,43],termin:[41,48],test:[3,6,14,27,40,41,42,43,45],test_accuracy_scor:[13,28],test_adaboost:[13,28],test_all_views_ask:39,test_argu:39,test_asked_the_whole_dataset:39,test_biclass:39,test_compat:[13,28],test_configur:[13,28],test_dict_format:39,test_difficultymeasur:[13,28,32],test_difficultymeasuremodul:[13,28,32],test_disagreefus:[13,28,32],test_disagreefusionmodul:[13,28,32],test_diversity_util:[13,28],test_doublefaultfus:[13,28,32],test_doublefaultfusionmodul:[13,28,32],test_empty_arg:39,test_entropyfus:[13,28,32],test_entropyfusionmodul:[13,28,32],test_execclassif:13,test_execclassifmonoview:[13,28],test_execut:[13,28],test_file_load:39,test_fit:39,test_fus:[13,28,32],test_fusionmodul:[13,28,32],test_genargumentdictionari:39,test_gendirecortiesnam:39,test_genkfold:39,test_genkfolds_it:39,test_gensplit:39,test_gensplits_no_it:39,test_gentestfoldspr:30,test_get_classic_db_csv:39,test_get_classic_db_hdf5:39,test_get_mc_estim:39,test_get_plausible_db_hdf5:39,test_get_the_arg:39,test_getdatabasefunct:39,test_gethp:30,test_getmultiviewdb:[13,28],test_hdf5:39,test_initconst:30,test_initrandomst:39,test_initstatsiterrandomst:39,test_inittraintest:30,test_label:[27,41,43],test_labels_fold_0:[41,43],test_labels_fold_1:[41,43],test_labels_fold_2:43,test_labels_fold_3:43,test_labels_fold_4:43,test_metr:[13,28,43],test_mono_view:[13,28],test_monoview_classifi:[13,28],test_monoviewutil:[13,28],test_multiclass:[13,28],test_multiclass_n:39,test_multiclass_ovo:39,test_multiclass_ovo_multiview:39,test_multiclass_ovr:39,test_multiclass_ovr_multiview:39,test_multiple_it:39,test_multiview_classifi:[13,28],test_multiviewovowrapper_fit:39,test_multiviewovrwrapper_fit:39,test_one_statit:39,test_ovo_no_it:39,test_parsethearg:39,test_plausible_hdf5:39,test_predict:39,test_pseudocqfusionmodul:[13,28,32],test_pseudocqmeasur:[13,28,32],test_random_state_42:39,test_random_state_pickl:39,test_resultanalysi:13,test_simpl:[30,39],test_simple_ovo:39,test_two_class:39,test_util:[13,28],test_vers:13,testcas:[29,30,39],text:46,than:[27,41,42,43,46],thank:[6,14,40,46],thant:46,thei:[42,44,45,46],them:[27,43,44,45,46],theori:46,therefor:42,thi:[2,3,6,7,13,14,27,40,42,43,44,45,46],third:41,thoroughli:3,thread:[6,14,27,42],three:[40,42,44,45,46],through:[41,42,45],ths:7,thu:46,tie:27,time:[41,42,43,45,46],timeout:27,to_numpy_arrai:27,tobe:27,too:[27,46],took:41,tool:44,toolbox:47,top:43,total:[6,14],track_traceback:[6,14,27],trade:42,trade_off:45,train:[6,14,27,40,41,42,43,45],train_indic:[27,39,41,43,45],train_label:[27,41,43],train_metr:43,train_pr:[41,43],transform:[13,14,45],transform_data_if_need:45,transpar:27,tree:[40,41,42,46],triplet:[6,14,27],truth:7,tune:[6,14],tupl:27,turn:27,tutori:[3,40],two:[27,40,41,42,45,46],txt:[41,42,43],type:[6,7,14,27,41,42,44],type_var:27,unabl:40,unbalanc:46,under:43,underli:27,understand:[41,47],uniform:46,uniqu:[27,40,44],unittest:[29,30,39],unknown:42,unlucki:43,unseen:46,unsign_label:27,unsupervis:27,updat:3,update_hdf5_dataset:27,usabl:45,usag:45,use:[3,6,14,27,40,41,42,44,45,46,47,48],used:[2,6,13,14,27,41,42,44,45,46],used_indic:27,useful:[27,41,42,45,46],usefulness:[41,42],user:[6,14,27,42],uses:[41,46],using:[27,41,42,43,44,45,46],usual:44,util:[12,13,14,45],val_1:45,val_2:45,valid:[6,14,27,41,42],validation_indic:27,valu:[6,7,14,27,40,42,44,45,46],value1:[6,14],value2:[6,14],vanilla:41,variabl:[6,14,44,45],vector:27,veri:[41,45],verifi:40,version:[40,41,42],versu:27,vertic:43,view:[3,6,14,27,40,41,42,43,44],view_data:[27,44,45],view_dataset:44,view_dict:27,view_idx:27,view_index:[14,27,44,45],view_indic:[27,39,45],view_limit:27,view_nam:[14,27,44],views_dictionari:[6,14],views_indic:[14,27],views_list:45,virtual:48,visual:41,vote:[27,40,42],wai:[41,44,45,46],want:[42,45,46],weighted_linear_late_fus:[41,42,43],weightedlinear:[13,14,15,22,23],welcom:1,well:41,were:[40,42],what:48,when:[27,42,43,46],where:[27,41,45],whether:[7,27],which:[6,7,14,27,40,41,42,43,45,46,47],white:40,whole:[27,41,42,43],why:42,wil:[6,14,27,45],wip:3,wise:[27,46],witch:27,within:46,without:[3,45,46],won:46,work:[3,7,27,40,43,44],worst:41,would:42,wrapper:[27,45],written:44,www:[],y_pred:7,y_test:[27,45],y_true:7,yaml:[],yaml_config:27,yml:[27,43],you:[40,41,42,43,44,46,48],your:[40,47],your_file_nam:[],zero:27,zip:44,zoom:[40,41,43]},titles:["Result analysis module","Multiview Platform","Welcome to the exection documentation","Welcome to Supervised MultiModal Integration Tool\u2019s documentation","multiview_platform","&lt;no title&gt;","Classification execution module","Metrics framework","Classifiers","Diversity Fusion Classifiers","Utils execution module","Utils Multiclass module","Mono and mutliview classification","multiview_platform references","multiview_platform.mono_multi_view_classifiers package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion package","multiview_platform.mono_multi_view_classifiers.utils package","multiview_platform.tests package","multiview_platform.tests.test_metrics package","multiview_platform.tests.test_mono_view package","multiview_platform.tests.test_monoview_classifiers package","multiview_platform.tests.test_multiview_classifiers package","multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure package","multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion package","multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion package","multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion package","multiview_platform.tests.test_multiview_classifiers.Test_Fusion package","multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package","multiview_platform.tests.test_utils package","Example 0 : Getting started with SuMMIT on digits","Example 1 : First big step with SuMMIT","Example 2 : Understanding the hyper-parameter optimization","Example 3 : Understanding the statistical iterations","Taking control : Use your own dataset","Taking control : Use your own algorithms","Hyper-parameter 101","SuMMIT Tutorials","Install SuMMIT"],titleterms:{"1560_12_25":41,"15_42":41,Adding:[44,45],The:[42,44],Use:[44,45],accuracy_scor:41,addit:44,algorithm:45,all:[41,45],analysi:0,analyze_result:[16,17,18,19,20,21,22,26],arrai:45,bare:44,bayesianinfer:25,big:41,build:45,choic:46,classif:[6,12,40],classifi:[8,9,41,45],complex:45,conclus:[40,42],config_fil:41,configur:27,content:[13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],context:43,control:[44,45],convers:44,cross:46,csv:41,dataset:[27,41,44,45],depend:41,difficulty_fus:16,digit:40,disagree_fus:17,discoveri:40,divers:9,document:[2,3],double_fault_fus:18,durat:43,earlyfus:23,earlyfusionpackag:24,entropy_fus:19,error_analysis_2d:41,error_analysis_bar:41,exampl:[40,41,42,43,44,45],exec_classif:14,exect:2,execut:[6,10,13,27],experi:42,fat_late_fus:20,fat_scm_late_fus:21,few:42,file:41,first:[40,41],fold:46,framework:7,fusion:[9,22,23,24,25],gener:41,get:[40,41],get_multiview_db:27,get_v:45,grid:[42,46],hand:42,hdf5:44,how:43,html:41,hyper:[42,46],hyper_parameter_search:27,impact:42,indic:3,inform:[40,44],instal:48,integr:3,interpret:45,introduct:41,intuit:46,iter:43,latefus:23,latefusionpackag:25,launch:48,learn:42,list:45,log:41,main:40,majorityvot:25,make:45,make_file_config:27,manipul:45,method:[23,24,25],metric:[7,46],modul:[0,6,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],mono:12,mono_multi_view_classifi:[14,15,16,17,18,19,20,21,22,23,24,25,26,27],monoview:45,more:[40,42,45],multiclass:[11,27],multimod:3,multiview:[1,45],multiview_classifi:[15,16,17,18,19,20,21,22,23,24,25,26],multiview_platform:[4,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],multiview_result_analysi:27,mutliview:12,necess:44,object:45,optim:[42,46],own:[44,45],packag:[14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],paramet:[42,46],pickl:41,platform:1,png:41,pseudo_cq_fus:26,random:[42,46],random_st:41,refer:13,report:42,result:[0,40,42],result_analysi:14,rule:41,run:40,scmforlinear:25,score:40,search:[42,46],setup:48,simpl:45,size:42,split:[42,46],start:[40,41],statist:43,step:41,structur:44,submodul:[13,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],subpackag:[13,14,15,22,23,28,32,37],summit:[40,41,47,48],supervis:3,svmforlinear:25,tabl:3,take:[44,45],task:45,test:[13,28,29,30,31,32,33,34,35,36,37,38,39,46],test_accuracy_scor:29,test_adaboost:31,test_compat:31,test_configur:39,test_difficultymeasur:33,test_difficultymeasuremodul:33,test_disagreefus:34,test_disagreefusionmodul:34,test_diversity_util:32,test_doublefaultfus:35,test_doublefaultfusionmodul:35,test_entropyfus:36,test_entropyfusionmodul:36,test_execclassif:28,test_execclassifmonoview:30,test_execut:39,test_fus:37,test_fusionmodul:37,test_getmultiviewdb:39,test_metr:29,test_mono_view:30,test_monoview_classifi:31,test_monoviewutil:30,test_multiclass:39,test_multiview_classifi:[32,33,34,35,36,37,38],test_pseudocqfusionmodul:38,test_pseudocqmeasur:38,test_resultanalysi:28,test_util:39,them:41,thi:41,tool:[3,48],train:46,transform:27,tutori:[41,47],understand:[42,43,46],usag:42,use:43,util:[10,11,27],valid:46,version:13,view:45,weightedlinear:[24,25],welcom:[2,3],work:45,yml:41,your:[44,45]}})
\ No newline at end of file
+Search.setIndex({docnames:["analyzeresult","api","execution","index","modules","readme_link","references/monomulti/exec_classif","references/monomulti/metrics","references/monomulti/multiview_classifiers/classifiers","references/monomulti/multiview_classifiers/diversity_fusion","references/monomulti/utils/execution","references/monomulti/utils/multiclass","references/monomultidoc","references/multiview_platform","references/multiview_platform.mono_multi_view_classifiers","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion","references/multiview_platform.mono_multi_view_classifiers.utils","references/multiview_platform.tests","references/multiview_platform.tests.test_metrics","references/multiview_platform.tests.test_mono_view","references/multiview_platform.tests.test_monoview_classifiers","references/multiview_platform.tests.test_multiview_classifiers","references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure","references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion","references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion","references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure","references/multiview_platform.tests.test_utils","tutorials/example0","tutorials/example1","tutorials/example2","tutorials/example3","tutorials/example4","tutorials/example5","tutorials/hps_theory","tutorials/index","tutorials/installation"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,sphinx:56},filenames:["analyzeresult.rst","api.rst","execution.rst","index.rst","modules.rst","readme_link.rst","references/monomulti/exec_classif.rst","references/monomulti/metrics.rst","references/monomulti/multiview_classifiers/classifiers.rst","references/monomulti/multiview_classifiers/diversity_fusion.rst","references/monomulti/utils/execution.rst","references/monomulti/utils/multiclass.rst","references/monomultidoc.rst","references/multiview_platform.rst","references/multiview_platform.mono_multi_view_classifiers.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage.rst","references/multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion.rst","references/multiview_platform.mono_multi_view_classifiers.utils.rst","references/multiview_platform.tests.rst","references/multiview_platform.tests.test_metrics.rst","references/multiview_platform.tests.test_mono_view.rst","references/multiview_platform.tests.test_monoview_classifiers.rst","references/multiview_platform.tests.test_multiview_classifiers.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_Fusion.rst","references/multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure.rst","references/multiview_platform.tests.test_utils.rst","tutorials/example0.rst","tutorials/example1.rst","tutorials/example2.rst","tutorials/example3.rst","tutorials/example4.rst","tutorials/example5.rst","tutorials/hps_theory.rst","tutorials/index.rst","tutorials/installation.rst"],objects:{"":{multiview_platform:[13,0,0,"-"]},"summit.multiview_platform":{result_analysis:[0,0,0,"-"]},summit:{execute:[2,0,0,"-"]}},objnames:{"0":["py","module","Python module"]},objtypes:{"0":"py:module"},terms:{"0x7f01ddfc24c8":41,"0x7fc3cbffd678":[],"0x7fc7d2dfb048":[],"0x7ff2cf1b7048":[],"14_12":43,"15_42":43,"200x100":44,"200x40":44,"200x55":44,"2020_04_02":43,"20v3":[],"25th":41,"2d_plot_data":[41,43],"abstract":[],"boolean":45,"case":[3,45],"class":[41,43,44,45,46],"default":42,"final":41,"float":[45,46],"function":[41,45],"import":[40,41,42,43,44,45],"int":[],"long":[44,46],"new":[43,45],"return":[40,43,45],"short":3,"super":45,"true":[41,42,45,46],"try":[42,44,46],"while":[42,43,44,46],And:[42,44],DTs:40,For:[40,41,42,43,45,46],IDs:44,One:[45,46],THe:[],That:40,The:[3,40,41,43,45,46,47],Then:[41,44,45,48],These:[40,41,42,45],Ths:46,Use:47,Used:[],With:40,Yes:[],__init__:45,_gen:[],_iter:46,_search:[],_view:46,abl:[40,42,44,45],about:45,absolut:[],accepts_multi_class:[],access:45,accord:42,accuraci:[40,41,42,46],accuracy_scor:[42,43],adaboost:[42,43],add:[3,42,44,45],add_gaussian_nois:[],add_nois:[],added:45,adding:44,addit:[],advantag:3,after:[41,43],aggreg:40,agrument:[],aim:[],aks:[],alexi:[],algo:45,algo_modul:45,algoclassifi:45,algorithm:[40,41,42,46,47],algos_monoview:[41,42],algos_multiview:[41,42],all:[3,40,42,43,44],all_view:[],allow:[3,41,42,44,45,46],along:[],alreadi:[40,42],also:[41,42,43,44],alter:41,among:41,amongst:41,amount:42,analysi:[1,40,43,44],analyz:[41,42,44,46],analyze_iter:[],analyze_result:[13,14,15],ani:[40,46],anim:44,anoth:40,answer:[],appear:43,append:45,approxim:42,arange_metr:[],are_spars:[],arg:[],arg_view:[],argmax:[],argument:[41,42,43,45,46],argument_dictionari:[],around:[],arrai:44,arrang:[],artifact:44,ask:[40,45],asked_labels_nam:[],askedlabelsnam:[],assess:40,astyp:44,atm:44,attr:44,attribut:[44,45],author:[],automat:[],avail:[40,41,42,44,45],averag:41,avoid:[41,42,45],axi:40,badg:[],balanc:41,baptist:[],bar:[40,41,43],bar_plot_data:[41,43],base:42,base_estim:42,base_file_nam:45,baseestim:[],basemonoviewclassifi:45,basemultiviewclassifi:45,basic:[40,41],bauvin:[],bayesianinfer:[13,14,15,22,23],bear:44,bear_112:44,becaus:41,becom:42,been:[3,41,44],befor:45,begin:41,behavior:40,being:[3,44,46],below:[41,42],benchmark:[40,41,42,43,44],benchmark_arguments_dictionari:[],benchmark_init:[],benchmarkargumentdictionari:[],benchmarkargumentsdictionari:[],benielli:[],best:[40,41,42,43,45,46],better:[41,42,43],between:[42,43,44,45,46],biclass:[],big:47,bigger:42,bird:44,bird_785:44,bit:45,black:[40,41],blob:[],blue:[],bool:[],both:[40,42],bracket:42,brows:45,build:[41,42],bypass:46,call:[41,45],callabl:[],can:[40,41,42,43,44,45,46],capac:46,car:44,car_369:44,carefulli:[],center:[],chang:[],check:[],check_selected_label_nam:[],choic:42,choos:42,chose:44,cl_type:[],clariti:44,classier:[],classif:[41,46],classifi:[3,12,40,42,43,44,46],classification_indic:[],classifier_class_nam:45,classifier_config:42,classifier_dict:[],classifier_nam:42,classifiers_decis:[],classifiers_nam:42,classmethod:[],clean:[],clear:41,clf_error:43,clone:[],close:43,closer:42,code:[2,3,42,43,45],color:40,column:[40,41],combin:42,command:48,comment:41,commentari:44,commun:45,compar:[40,45],compat:42,compens:[42,46],complementar:41,complementari:41,complementary_:41,complex:[41,42,46],compos:45,comput:[42,43,46],conain:[],concat_view:[],concaten:40,concern:40,confid:[],config:[40,41,42,43,44,46],config_fil:43,config_path:[],configstr:[],configur:[13,14,40,41,42,46],configurationmak:[],confirm:[],confus:41,confusion_matrix:[41,43],conjunct:[],consensu:45,consequ:[41,42],consist:[3,40,41,44,46],consol:[],constantli:3,contain:[40,41,44,45],content:[1,3,12],control:[41,42,43,46,47],convert:44,copi:41,copy_hdf5:[],copy_view:[],core:[3,41],correct:45,correctli:44,correspond:[44,45],costli:43,could:[40,41,45],count:41,counterpart:40,coupl:[],couplediversityfusionclassifi:[],cover:42,creat:[44,45],create_dataset:44,create_group:44,criterion:[41,42],crop:[],cross:[41,42],csv:[40,43,44],current:43,customrandint:45,customuniform:45,data:[41,44,45,46],data_file_path:44,databas:41,dataset:[3,13,14,40,42,43,46,47],dataset_var:45,dataseterror:[],datasetfil:[],datasetlength:44,datasets_already_exist:[],date:41,deal:[],debug:[],decemb:41,decis:[40,41,42,45,46],decision_funct:[],decision_tre:[41,42,43,46],decisiontre:41,decisiontreeclassifi:42,decod:[],deconstruct:[],decreas:42,decrypt:41,deep:46,deeper:46,def:45,defin:[42,44,45],delet:[],delete_hdf5:[],delimit:44,demand:42,depend:[42,43,46],depth:[42,46],describ:[3,41,44],descript:[],descriptor:45,design:3,detail:[],detect:[40,41],develop:[40,44,48],deviat:43,dict:[],dictionari:[],dictionnari:[],did:42,differ:[3,41,42,43,44,46],difficult:41,difficulty_fus:[13,14,15],difficultyfus:[],digit:47,digit_col_grad_0:40,dimension:42,direct:40,directli:[],directori:[40,41,43,45,48],disagree_fus:[13,14,15],disagreefus:[],disambigu:[],discov:3,disk:[],dispach:[],displai:[41,43],distinguish:43,distrib:45,distribut:[45,46],divers:12,diversity_measur:[],diversity_util:[],divid:46,doc:45,doc_summit:[41,42,43],docstr:[],document:[1,45],docutil:[],doe:[],doe_562:44,dominiqu:[],done:43,dot:[],double_fault_fus:[13,14,15],doublefaultfus:[],doublet:[],download:48,drag:40,draw:[40,42,43,46],drive:[],dtype:44,due:41,durat:[41,42],durations_datafram:[41,43],durations_stds_datafram:43,dure:[40,41,42,46],each:[40,41,42,43,44,45,46],earli:40,earlier:[42,45],earlyfus:[13,14,15,22],earlyfusionpackag:[13,14,15,22,23],easi:3,easier:42,easili:44,edg:45,effici:[43,46],either:[40,41,42],els:[],empti:41,enabl:46,encapsul:41,encod:44,end:46,enter:[],entropi:42,entropy_fus:[13,14,15],entropyfus:[],enumer:44,environ:48,equal:46,equivalent_draw:[42,46],error:[40,41,43],error_analysis_2d:43,error_analysis_bar:43,estim:[3,42,46],estimator__param:[],etc:[],evalu:[41,46],even:[42,43],exact:42,exampl:[46,47],example4:[],example_1:41,example_2_1_1:42,example_2_1_2:42,example_:41,example_error:43,example_id:44,example_ids_path:44,example_indic:45,examples_indic:[],except:[],exec_benchmark:[],exec_classif:13,exec_one_benchmark_mono_cor:[],exect:1,execut:[2,12,14,40,41,42,43],exeperi:[],exercis:[],exist:[],experi:41,experiment:[],explain:[40,41,45,47],explor:43,extern:44,extract:42,extract_dict:[],extract_subset:[],extrat:[],f1_score:[41,43],fact:[42,46],factor:42,fail:[40,41,43],failur:41,fair:46,fairest:46,fake:[],fakearg:[],fakedset:[],fakeestimn:[],fakemcestim:[],fakemvclassifi:[],fakemvclassifierprob:[],fakenonprobaestim:[],fakeprobaestim:[],fals:[42,44,45],familiar:42,famou:40,far:[41,42],fashion:45,faster:46,fat_late_fus:[13,14,15],fat_scm_late_fus:[13,14,15],featru:41,featur:[3,40,41,43,44,45],feature_import:[41,43],feature_importances_datafram:[41,43],feature_importances_dataframe_std:43,fie:42,figur:[40,41,42,43,45],figure_nam:45,file:[40,42,43,44,45,46],file_nam:[],file_path:44,file_typ:[],fill:44,filter:[],find:[41,44],find_dataset_nam:[],first:[42,45,46,47],first_classifier_decis:[],fit:[42,44,45,46],fit_multiview:[],fit_param:[],five:42,fix:[41,44],fixtur:[],flag:[],focu:42,focus:42,fold:[41,42,43],folds_list:[],follow:[40,41,42,44,45,47,48],format:[40,41,44],format_dataset:[],format_param:[],four:45,frac:46,framework:12,from:[40,41,42,43,44,45,46],full:[41,45],full_pr:[41,43],fulli:44,further:[],fusion:[12,13,14,15,40,41,42],futur:42,gap:44,gen_argument_dictionari:[],gen_direcorties_nam:[],gen_heat_map:[],gen_k_fold:[],gen_report:[],gen_single_monoview_arg_dictionari:[],gen_single_multiview_arg_dictionari:[],gen_split:[],gener:[42,46],generated_view_1:[41,43],generated_view_1feature_import:[41,43],generated_view_2:[41,43],generated_view_2feature_import:[41,43],generated_view_3:[41,43],generated_view_3feature_import:[41,43],generated_view_4:[41,43],generated_view_4feature_import:[41,43],genfromtxt:44,get:[3,42,43,44,45,47],get_best_param:[],get_candidate_param:[],get_classic_db_csv:[],get_classic_db_hdf5:[],get_config:[],get_database_funct:[],get_examples_views_indic:45,get_interpret:45,get_label:[],get_label_nam:[],get_mc_estim:[],get_metrics_scor:[],get_multiview_db:[13,14],get_nam:[],get_nb_class:[],get_nb_exampl:[],get_nb_poss:[],get_param:[],get_param_distrib:[],get_path_dict:[],get_plausible_db_hdf5:[],get_scor:[],get_shap:[],get_the_arg:[],get_total_metric_scor:[],get_v:[],get_view_dict:[],get_view_nam:[],getdatabas:[],gini:[41,42],gitlab:[],give:42,given:[40,42,46],globaldiversityfusionclassifi:[],gnu:[],goal:[44,45],good:46,gpl:[],gradiant:40,grai:[40,41,43],grant:42,graph:41,great:[42,43],greater:[],grid:[],gridsearchcv:[],ground:[],group:44,groupkfold:[],guaussian:[],guidelin:45,h5py:44,hard:41,harder:41,hardwar:42,has:[3,41,42,43,44,45,46],have:[40,42,43,44,45,46],hdd:[],hdf5:[43,45],hdf5_file:44,hdf5dataset:[],heat:[],help:42,here:[40,41,42,43,44,45],hide:42,high:42,higher:[41,42],highli:[41,42],homm:[],hook:[],horizont:41,hover:41,how:[41,42,46,47],howev:[40,41,42,43,46],hps_arg:[42,46],hps_iter:[],hps_kwarg:[],hps_method:[],hps_report:42,hps_type:[42,46],hpsearch:[],hte:[],html:[40,43],http:[],huge:46,human:44,hyper:[41,43,45,47],hyper_param_search:[],hyper_parameter_search:[13,14,45],hyperparm:[],idea:46,ideal:42,ids:44,imag:[40,41,44],img:[],impact:43,implement:[42,45],improv:[42,43],includ:45,incorrectli:41,increas:[42,43],inde:[41,43,45],independ:42,index:[3,41,44],indic:45,indices_multiclass:[],individu:43,inform:[41,42,45],inherit:[10,11,45],init:[],init_argument_dictionari:[],init_attr:[],init_benchmark:[],init_example_indc:[],init_kwarg:[],init_kwargs_func:[],init_log_fil:[],init_monoview_exp:[],init_multiple_dataset:[],init_multiview_exp:[],init_random_st:[],init_stats_iter_random_st:[],init_view:[],init_view_nam:[],initi:[44,45],input:[43,44,45],input_:[],insid:42,instal:[3,47],instanc:[],instead:[],instruct:[3,42],integ:[44,45],integr:45,interact:[40,41],interest:40,interpret:41,interpret_str:45,interpretstr:45,introduc:41,invers:[],investig:40,involv:46,is_dict_in:[],is_just_numb:[],is_temp:[],issu:[41,43],ist:[],iter:[46,47],iter_1:43,iter_2:43,iter_3:43,iter_4:43,iter_5:43,iter_:43,its:[3,42,43,44,46],itself:[],joblib:[],john_115:44,join:45,just:[42,44,45],k_fold:[],keep:[43,44,46],kei:44,key1:[],key1_1:[],key2:[],keyword:[],kfold:[],know:[40,42,46],knowledg:[45,46],kwarg:[],kwargs_init:[],l18:41,l22:41,l26:41,l35:41,l37:42,l43:41,l45:[41,42],l47:[41,42],l49:42,l52:41,l54:42,lab:[],label:[3,40,41,44,45],label_1:41,label_2:41,label_3:41,label_4:41,label_5:41,label_6:41,label_7:41,label_8:41,label_nam:44,labels_combin:[],labels_data:44,labels_dataset:44,labels_dictionari:[],labels_dset:44,labels_file_path:44,labels_nam:44,larger:43,lassifi:40,last:46,late:[40,41,42],latefus:[13,14,15,22],latefusionpackag:[13,14,15,22,23],later:41,lead:[],learn:[40,41,43,45,46],learning_indic:[],least:41,left:41,len:44,less:41,let:[41,42,43,44,45,46],letter:[],level:[],librari:[],licens:[],light:[40,41],like:[42,43,46],limit:[],line:[41,42,43,44,46],lis:[],list:44,list_x:45,listof:[],load:[44,45],loadabl:41,loc:[],locat:[],log:43,logfil:[],longer:42,look:[42,43],lot:[41,42],low:[],lucki:43,m2r:[],made:[40,42,46],mai:[40,41],main:[3,41,43,45],mainli:41,major:[40,41,42],majorityvot:[13,14,15,22,23],make:[41,44],make_file_config:[13,14],make_me_noisi:[],make_scor:[],mandatori:[44,45],mani:[40,42],manipul:[],map:[],master:[],matplotlib:[],matric:44,matrix:[40,41],matter:42,max:46,max_depth:[41,42,45,46],max_length:44,maximum:42,mayb:41,mean:[41,42,43,46],mean_on_5_it:43,meaning:43,member:[10,11],memori:45,metadata:44,metadata_group:44,method:[13,14,15,22,42,45,46],methodnam:[],metric:[12,40,41,42],metric_modul:[],metric_princ:[42,46],metric_scor:[],metrics_var:[],micro:41,min:42,mind:[42,46],minimum:44,minut:41,mis:[40,41],miss:44,missclassifi:41,mixli:41,mod:42,model:46,model_select:[],modif:42,modifi:[3,43,44,45],modul:[1,2,3,12,42,45],moment:[42,44],mono:[3,40,42],mono_multi_view_classifi:[13,45],monoview:[40,41,42,43,46],monoview_algo:[],monoview_classifi:45,monoview_estim:[],monoview_util:45,monoviewwrapp:[],more:[43,44,46],moreov:[42,45],most:[41,42,44,46],mous:40,mt19937:[],mtrand:41,much:42,multi:[3,41,42],multi_class_label:[],multiclass:[12,13,14,41,45],multiclass_label:[],multiclasswrapp:[],multicor:[],multilabel:[],multimod:[],multipl:43,multipli:[],multivew:42,multiview:[3,40,41,42,43,44,46],multiview_algo:[],multiview_classifi:[13,14,45],multiview_classifier_arg:[],multiview_decision_funct:[],multiview_platform:[3,40,41,42,43,45],multiview_result_analysi:[13,14],multiview_util:45,multiviewovowrapp:[],multiviewovrwrapp:[],multiviewwrapp:[],musch:46,must:[44,45],mutli:[],mutlipli:[],mutliview:[42,45],mutual:41,mutual_error_:41,n_class:[],n_estim:42,n_exampl:[],n_featur:[],n_iter:[42,46],n_job:[],n_output:[],n_sampl:[],n_view:42,naiv:42,name:[41,42,44,45],name_db:[],name_m:45,namedb:[],nativ:[],nb_class:[],nb_core:[],nb_exampl:[],nb_featur:[],nb_fold:[42,46],nb_label:[],nb_view:[],nbclass:44,nbcore:[],nbview:44,ndarrai:[],nearli:43,necess:42,necessari:[],need:[42,45,48],needed_input:45,new_mv_algo:45,new_mv_algo_modul:45,newmvalgo:45,newmvalgoclassifi:45,nice:[],nois:[],noise_std:[],noisi:[],none:[42,45,46],norm_typ:45,normal:[],notic:42,now:[42,43,44,45],number:[41,42,43,44,45,46],numer:44,numpi:[41,44],object:[41,44],obtain:42,off:42,onc:[40,45],one:[40,41,42,43,44,45,46],ones:[40,41,43,45,46],onevsoneclassifi:[],onevsrestclassifi:[],oni:[],onli:[40,42,44,45],optim:[41,45,47],option:42,order:[41,42,43,44,45,46],org:[],organ:41,origin:[],other:[40,45],our:[42,45],outcom:[],outlier:[40,41],output:[41,42,43],output_file_nam:[],ov_wrapp:[],over:[42,43],overfit:[42,46],ovowrapp:[],ovrwrapp:[],own:47,packag:[3,13,48],page:[3,41,42],pair:[],panda:41,parallel:43,param:[],param_1:45,param_2:45,param_distribut:[],param_grid:[],param_nam:45,paramet:[41,43,45,47],parametr:41,pars:41,parse_the_arg:[],parsedargumentpars:[],part:[41,42,45,46],partial:44,particular:41,pass:46,pass_default_config:[],past:42,path:[44,45],path_f:[],path_for_new:[],path_to_config_fil:[],pathf:44,pbject:[],peopl:44,per:42,percentag:[],perdict_proba:[],perform:[3,41,42,43,46],perfrom:40,person:42,pickl:43,pip:48,pipelin:[],plai:[],plane:44,plane_452:44,platform:[3,40,41,42,43,44,45,46,48],plausibl:41,plot:[40,41,43],plotli:[],png:[40,43,45],point:[],poor:43,possibl:[40,41,42,45,46],potenti:40,precis:41,pred:[],predict:[45,46],predict_proba:[],pref:[],prefix:45,prerequisit:[],present:41,previou:[41,42,43,44],princip:41,print:[41,43],print_metric_scor:[],prior:46,prioriti:42,probabl:45,problem:[41,42,46],procedur:3,process:[42,45,46],prod:[],profit:42,progress:[3,43,44],project:40,propos:46,provid:[41,42,44,45,46],pseudo:[42,43],pseudo_cq_fus:[13,14,15],publish:41,purpos:[],pydata:[],pypi:[],python3:48,python:[42,44,45,48],pyyaml:[],quantiti:41,quick:40,quit:46,ram:[],ramdataset:[],randint:[],random:[41,43,45],random_s:45,random_st:[43,45,46],random_state_arg:[],randomisedsearchcv:42,randomizedsearchcv:[],randomli:[41,46],randomsatearg:[],randomsearch:[],randomst:41,rang:[],rate:41,ratio:[41,42,46],read:3,readi:44,readm:5,recod:44,recommend:[41,48],rectangl:40,reduc:[42,45],redund:41,redundant_:41,ref:45,refer:[3,42,44],refit:[],regard:45,regress:[],regroup:[40,41],rel:44,relat:[41,45],relev:[42,45,46],remot:42,remov:3,repeat:46,report:41,repositori:[],repres:[40,42,44],represent:41,reproduc:45,reproduct:[41,46],requir:[44,45,46,48],res_dir:41,resourc:43,resp:[],rest:[],rest_of_the_arg:[],restrain:45,result:[1,41,43,44,46],result_analysi:13,result_directori:[],results_directori:[],resum:[],retriev:[],revers:[],right:[41,44],robust:46,row:[40,41],run:[41,42,43,44,46,48],runtest:[],rvs:[],s100:44,sai:[41,45],same:[40,41,42,44,45],sampl:40,satisfi:44,sattist:[],save:[40,41,43,44,45],save_config:[],scalar:[],scale:[],scikit:45,scipi:[],scmforlinear:[13,14,15,22,23],score:[41,42,43],score_test:[],scorer:[],scores_arrai:[],script:44,search:3,second:[41,42],second_classifier_decis:[],section:41,see:[40,41,42,45],seed:41,seem:41,seen:[42,43],select:46,select_label:[],select_views_and_label:[],selected_label_nam:[],self:45,separ:[],serv:46,set:[40,41,42,43,44,45,46],set_el:[],set_param:[],settl:43,setup:[],setupclass:[],sever:[3,40,42,43,46],shade:43,shape:[41,44],share:[],shield:[],short_nam:[],shorter:46,should:[41,42,44,45],show:[40,41,43],sign_label:[],signal:[],similar:[40,41,45],similarli:[40,42,43,45],simplest:[41,46],simul:41,singl:[43,44],six:[],size:46,sklearn:[42,45,46],slice:[],slightli:43,small:42,solut:[],solv:[42,46],some:[40,41,42,43,45,47],soon:41,sore:44,sort:44,sotr:45,sould:41,sound:44,sourc:3,source_view_nam:[],space:[41,42],spare:[],spars:44,spear_mint:[],spearmint:[],specif:[41,42,43],specifi:[41,42,46],spectacularli:41,spike:41,split:[41,43],split_ratio:[],splitter:[41,42],stabl:[],standard:43,star:41,start:[3,47],started_1560_12_25:[41,43],startl:41,state:[41,45],statist:47,stats_it:43,stats_iter_random_st:[],statsit:[],statu:[],std:43,step:[42,45,47],stop:[],store:[41,42,43,44,45],str:[],straight:43,stratifi:[],stratifiedkfold:[],stratifiedshufflesplit:46,string:[44,45],strongli:46,structur:[41,43],stuff:[],sub:46,submodul:15,subobject:[],subset:[42,46],succeed:43,succeerecd:40,success:41,sum:48,summari:[41,43,45],summit:[3,42,43,45,46],summit_doc:41,supervis:[],supplementari:45,support:44,suppos:[44,45,46],sure:44,svg:[],svm_jumbo_fus:[],svmforlinear:[13,14,15,22,23],symmetr:41,tabl:40,tabul:[],take:[41,42,46,47],target:45,target_dataset:[],target_view_index:[],task:[40,42,46],teardown:[],teardownclass:[],tell:[],temporari:[],term:[41,42,43],termin:[41,48],test:[3,40,41,42,43,45],test_accuracy_scor:[13,28],test_adaboost:[13,28],test_all_views_ask:[],test_argu:[],test_asked_the_whole_dataset:[],test_biclass:[],test_compat:[13,28],test_configur:[13,28],test_dict_format:[],test_difficultymeasur:[13,28,32],test_difficultymeasuremodul:[13,28,32],test_disagreefus:[13,28,32],test_disagreefusionmodul:[13,28,32],test_diversity_util:[13,28],test_doublefaultfus:[13,28,32],test_doublefaultfusionmodul:[13,28,32],test_empty_arg:[],test_entropyfus:[13,28,32],test_entropyfusionmodul:[13,28,32],test_execclassif:13,test_execclassifmonoview:[13,28],test_execut:[13,28],test_file_load:[],test_fit:[],test_fus:[13,28,32],test_fusionmodul:[13,28,32],test_genargumentdictionari:[],test_gendirecortiesnam:[],test_genkfold:[],test_genkfolds_it:[],test_gensplit:[],test_gensplits_no_it:[],test_gentestfoldspr:[],test_get_classic_db_csv:[],test_get_classic_db_hdf5:[],test_get_mc_estim:[],test_get_plausible_db_hdf5:[],test_get_the_arg:[],test_getdatabasefunct:[],test_gethp:[],test_getmultiviewdb:[13,28],test_hdf5:[],test_initconst:[],test_initrandomst:[],test_initstatsiterrandomst:[],test_inittraintest:[],test_label:[41,43],test_labels_fold_0:[41,43],test_labels_fold_1:[41,43],test_labels_fold_2:43,test_labels_fold_3:43,test_labels_fold_4:43,test_metr:[13,28,43],test_mono_view:[13,28],test_monoview_classifi:[13,28],test_monoviewutil:[13,28],test_multiclass:[13,28],test_multiclass_n:[],test_multiclass_ovo:[],test_multiclass_ovo_multiview:[],test_multiclass_ovr:[],test_multiclass_ovr_multiview:[],test_multiple_it:[],test_multiview_classifi:[13,28],test_multiviewovowrapper_fit:[],test_multiviewovrwrapper_fit:[],test_one_statit:[],test_ovo_no_it:[],test_parsethearg:[],test_plausible_hdf5:[],test_predict:[],test_pseudocqfusionmodul:[13,28,32],test_pseudocqmeasur:[13,28,32],test_random_state_42:[],test_random_state_pickl:[],test_resultanalysi:13,test_simpl:[],test_simple_ovo:[],test_two_class:[],test_util:[13,28],test_vers:[],testcas:[],text:46,than:[41,42,43,46],thank:[40,46],thant:46,thei:[42,44,45,46],them:[43,44,45,46],theori:46,therefor:42,thi:[2,3,40,42,43,44,45,46],third:41,thoroughli:3,thread:42,three:[40,42,44,45,46],through:[41,42,45],ths:[],thu:46,tie:[],time:[41,42,43,45,46],timeout:[],to_numpy_arrai:[],tobe:[],too:46,took:41,tool:44,toolbox:47,top:43,total:[],track_traceback:[],trade:42,trade_off:45,train:[40,41,42,43,45],train_indic:[41,43,45],train_label:[41,43],train_metr:43,train_pr:[41,43],transform:[13,14,45],transform_data_if_need:45,transpar:[],tree:[40,41,42,46],triplet:[],truth:[],tune:[],tupl:[],turn:[],tutori:[3,40],two:[40,41,42,45,46],txt:[41,42,43],type:[41,42,44],type_var:[],unabl:40,unbalanc:46,under:43,underli:[],understand:[41,47],uniform:46,uniqu:[40,44],unittest:[],unknown:42,unlucki:43,unseen:46,unsign_label:[],unsupervis:[],updat:3,update_hdf5_dataset:[],usabl:45,usag:45,use:[3,40,41,42,44,45,46,47,48],used:[2,41,42,44,45,46],used_indic:[],useful:[41,42,45,46],usefulness:[41,42],user:42,uses:[41,46],using:[41,42,43,44,45,46],usual:44,util:[12,13,14,45],val_1:45,val_2:45,valid:[41,42],validation_indic:[],valu:[40,42,44,45,46],value1:[],value2:[],vanilla:41,variabl:[44,45],vector:[],veri:[41,45],verifi:40,version:[40,41,42],versu:[],vertic:43,view:[3,40,41,42,43,44],view_data:[44,45],view_dataset:44,view_dict:[],view_idx:[],view_index:[44,45],view_indic:45,view_limit:[],view_nam:44,views_dictionari:[],views_indic:[],views_list:45,virtual:48,visual:41,vote:[40,42],wai:[41,44,45,46],want:[42,45,46],weighted_linear_late_fus:[41,42,43],weightedlinear:[13,14,15,22,23],welcom:1,well:41,were:[40,42],what:48,when:[42,43,46],where:[41,45],whether:[],which:[40,41,42,43,45,46,47],white:40,whole:[41,42,43],why:42,wil:45,wip:3,wise:46,witch:[],within:46,without:[3,45,46],won:46,work:[3,40,43,44],worst:41,would:42,wrapper:45,written:44,www:[],y_pred:[],y_test:45,y_true:[],yaml:[],yaml_config:[],yml:43,you:[40,41,42,43,44,46,48],your:[40,47],your_file_nam:[],zero:[],zip:44,zoom:[40,41,43]},titles:["Result analysis module","Multiview Platform","Welcome to the exection documentation","Welcome to Supervised MultiModal Integration Tool\u2019s documentation","summit","&lt;no title&gt;","Classification execution module","Metrics framework","Classifiers","Diversity Fusion Classifiers","Utils execution module","Utils Multiclass module","Mono and mutliview classification","multiview_platform references","multiview_platform.mono_multi_view_classifiers package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.difficulty_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.disagree_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.double_fault_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.entropy_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_late_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fat_scm_late_fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.EarlyFusionPackage package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.fusion.Methods.LateFusionPackage package","multiview_platform.mono_multi_view_classifiers.multiview_classifiers.pseudo_cq_fusion package","multiview_platform.mono_multi_view_classifiers.utils package","multiview_platform.tests package","multiview_platform.tests.test_metrics package","multiview_platform.tests.test_mono_view package","multiview_platform.tests.test_monoview_classifiers package","multiview_platform.tests.test_multiview_classifiers package","multiview_platform.tests.test_multiview_classifiers.Test_DifficultyMeasure package","multiview_platform.tests.test_multiview_classifiers.Test_DisagreeFusion package","multiview_platform.tests.test_multiview_classifiers.Test_DoubleFaultFusion package","multiview_platform.tests.test_multiview_classifiers.Test_EntropyFusion package","multiview_platform.tests.test_multiview_classifiers.Test_Fusion package","multiview_platform.tests.test_multiview_classifiers.Test_PseudoCQMeasure package","multiview_platform.tests.test_utils package","Example 0 : Getting started with SuMMIT on digits","Example 1 : First big step with SuMMIT","Example 2 : Understanding the hyper-parameter optimization","Example 3 : Understanding the statistical iterations","Taking control : Use your own dataset","Taking control : Use your own algorithms","Hyper-parameter 101","SuMMIT Tutorials","Install SuMMIT"],titleterms:{"1560_12_25":41,"15_42":41,Adding:[44,45],The:[42,44],Use:[44,45],accuracy_scor:41,addit:44,algorithm:45,all:[41,45],analysi:0,analyze_result:[16,17,18,19,20,21,22,26],arrai:45,bare:44,bayesianinfer:25,big:41,build:45,choic:46,classif:[6,12,40],classifi:[8,9,41,45],complex:45,conclus:[40,42],config_fil:41,configur:27,content:[13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],context:43,control:[44,45],convers:44,cross:46,csv:41,dataset:[27,41,44,45],depend:41,difficulty_fus:16,digit:40,disagree_fus:17,discoveri:40,divers:9,document:[2,3],double_fault_fus:18,durat:43,earlyfus:23,earlyfusionpackag:24,entropy_fus:19,error_analysis_2d:41,error_analysis_bar:41,exampl:[40,41,42,43,44,45],exec_classif:14,exect:2,execut:[6,10,13,27],experi:42,fat_late_fus:20,fat_scm_late_fus:21,few:42,file:41,first:[40,41],fold:46,framework:7,fusion:[9,22,23,24,25],gener:41,get:[40,41],get_multiview_db:27,get_v:45,grid:[42,46],hand:42,hdf5:44,how:43,html:41,hyper:[42,46],hyper_parameter_search:27,impact:42,indic:3,inform:[40,44],instal:48,integr:3,interpret:45,introduct:41,intuit:46,iter:43,latefus:23,latefusionpackag:25,launch:48,learn:42,list:45,log:41,main:40,majorityvot:25,make:45,make_file_config:27,manipul:45,method:[23,24,25],metric:[7,46],modul:[0,6,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],mono:12,mono_multi_view_classifi:[14,15,16,17,18,19,20,21,22,23,24,25,26,27],monoview:45,more:[40,42,45],multiclass:[11,27],multimod:3,multiview:[1,45],multiview_classifi:[15,16,17,18,19,20,21,22,23,24,25,26],multiview_platform:[13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],multiview_result_analysi:27,mutliview:12,necess:44,object:45,optim:[42,46],own:[44,45],packag:[14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],paramet:[42,46],pickl:41,platform:1,png:41,pseudo_cq_fus:26,random:[42,46],random_st:41,refer:13,report:42,result:[0,40,42],result_analysi:14,rule:41,run:40,scmforlinear:25,score:40,search:[42,46],setup:48,simpl:45,size:42,split:[42,46],start:[40,41],statist:43,step:41,structur:44,submodul:[13,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],subpackag:[13,14,15,22,23,28,32,37],summit:[4,40,41,47,48],supervis:3,svmforlinear:25,tabl:3,take:[44,45],task:45,test:[13,28,29,30,31,32,33,34,35,36,37,38,39,46],test_accuracy_scor:29,test_adaboost:31,test_compat:31,test_configur:39,test_difficultymeasur:33,test_difficultymeasuremodul:33,test_disagreefus:34,test_disagreefusionmodul:34,test_diversity_util:32,test_doublefaultfus:35,test_doublefaultfusionmodul:35,test_entropyfus:36,test_entropyfusionmodul:36,test_execclassif:28,test_execclassifmonoview:30,test_execut:39,test_fus:37,test_fusionmodul:37,test_getmultiviewdb:39,test_metr:29,test_mono_view:30,test_monoview_classifi:31,test_monoviewutil:30,test_multiclass:39,test_multiview_classifi:[32,33,34,35,36,37,38],test_pseudocqfusionmodul:38,test_pseudocqmeasur:38,test_resultanalysi:28,test_util:39,them:41,thi:41,tool:[3,48],train:46,transform:27,tutori:[41,47],understand:[42,43,46],usag:42,use:43,util:[10,11,27],valid:46,version:13,view:45,weightedlinear:[24,25],welcom:[2,3],work:45,yml:41,your:[44,45]}})
\ No newline at end of file
diff --git a/docs/build/tutorials/example1.html b/docs/build/tutorials/example1.html
index 3597be4042ecd18df2c96ff089fb1e8db6c4c2bf..4e2d6b8c49def718b4364a9d812a3038c14b9b1f 100644
--- a/docs/build/tutorials/example1.html
+++ b/docs/build/tutorials/example1.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Example 1 : First big step with SuMMIT &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -56,91 +58,92 @@
 <div class="section" id="a-generated-dataset-to-rule-them-all">
 <h2>A generated dataset to rule them all<a class="headerlink" href="#a-generated-dataset-to-rule-them-all" title="Permalink to this headline">¶</a></h2>
 <p>The <a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/data/doc_summit.hdf5">dataset</a> that will be used in the examples consists in</p>
-<ul class="simple">
-<li><dl class="simple">
-<dt>500 examples that are either</dt><dd><ul>
-<li><p>mis-described by all the views (labelled <code class="docutils literal notranslate"><span class="pre">Mutual_error_*</span></code>),</p></li>
-<li><p>well-described by all the views (labelled <code class="docutils literal notranslate"><span class="pre">Redundant_*</span></code>),</p></li>
-<li><p>well-described by the majority of the views (labelled <code class="docutils literal notranslate"><span class="pre">Complementary_*</span></code>),</p></li>
-<li><p>randomly well- or mis-described by the views (labelled <code class="docutils literal notranslate"><span class="pre">example_*</span></code>).</p></li>
+<ul>
+<li><dl class="first docutils">
+<dt>500 examples that are either</dt><dd><ul class="simple">
+<li>mis-described by all the views (labelled <code class="docutils literal notranslate"><span class="pre">Mutual_error_*</span></code>),</li>
+<li>well-described by all the views (labelled <code class="docutils literal notranslate"><span class="pre">Redundant_*</span></code>),</li>
+<li>well-described by the majority of the views (labelled <code class="docutils literal notranslate"><span class="pre">Complementary_*</span></code>),</li>
+<li>randomly well- or mis-described by the views (labelled <code class="docutils literal notranslate"><span class="pre">example_*</span></code>).</li>
 </ul>
 </dd>
 </dl>
 </li>
-<li><p>8 balanced classes named <code class="docutils literal notranslate"><span class="pre">'label_1'</span></code>, …, <code class="docutils literal notranslate"><span class="pre">'label_8'</span></code>,</p></li>
-<li><dl class="simple">
-<dt>4 views named <code class="docutils literal notranslate"><span class="pre">'generated_view_1'</span></code>, …, <code class="docutils literal notranslate"><span class="pre">'generated_view_4'</span></code>,</dt><dd><ul>
-<li><p>each view consisting in 3 features.</p></li>
+<li><p class="first">8 balanced classes named <code class="docutils literal notranslate"><span class="pre">'label_1'</span></code>, …, <code class="docutils literal notranslate"><span class="pre">'label_8'</span></code>,</p>
+</li>
+<li><dl class="first docutils">
+<dt>4 views named <code class="docutils literal notranslate"><span class="pre">'generated_view_1'</span></code>, …, <code class="docutils literal notranslate"><span class="pre">'generated_view_4'</span></code>,</dt><dd><ul class="simple">
+<li>each view consisting in 3 features.</li>
 </ul>
 </dd>
 </dl>
 </li>
 </ul>
 <p>It has been parametrized with the following error matrix that encapsulates the quantity of information available in each view for each label :</p>
-<table class="docutils align-default">
+<table border="1" class="docutils">
 <colgroup>
-<col style="width: 22%" />
-<col style="width: 20%" />
-<col style="width: 20%" />
-<col style="width: 20%" />
-<col style="width: 20%" />
+<col width="22%" />
+<col width="20%" />
+<col width="20%" />
+<col width="20%" />
+<col width="20%" />
 </colgroup>
-<thead>
-<tr class="row-odd"><th class="head"></th>
-<th class="head"><p>View 1</p></th>
-<th class="head"><p>View 2</p></th>
-<th class="head"><p>View 3</p></th>
-<th class="head"><p>View 4</p></th>
+<thead valign="bottom">
+<tr class="row-odd"><th class="head">&#160;</th>
+<th class="head">View 1</th>
+<th class="head">View 2</th>
+<th class="head">View 3</th>
+<th class="head">View 4</th>
 </tr>
 </thead>
-<tbody>
-<tr class="row-even"><td><p>label_1</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tbody valign="top">
+<tr class="row-even"><td>label_1</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
-<tr class="row-odd"><td><p>label_2</p></td>
-<td><p>0.55</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tr class="row-odd"><td>label_2</td>
+<td>0.55</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
-<tr class="row-even"><td><p>label_3</p></td>
-<td><p>0.40</p></td>
-<td><p>0.50</p></td>
-<td><p>0.60</p></td>
-<td><p>0.55</p></td>
+<tr class="row-even"><td>label_3</td>
+<td>0.40</td>
+<td>0.50</td>
+<td>0.60</td>
+<td>0.55</td>
 </tr>
-<tr class="row-odd"><td><p>label_4</p></td>
-<td><p>0.40</p></td>
-<td><p>0.50</p></td>
-<td><p>0.50</p></td>
-<td><p>0.40</p></td>
+<tr class="row-odd"><td>label_4</td>
+<td>0.40</td>
+<td>0.50</td>
+<td>0.50</td>
+<td>0.40</td>
 </tr>
-<tr class="row-even"><td><p>label_5</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tr class="row-even"><td>label_5</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
-<tr class="row-odd"><td><p>label_6</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tr class="row-odd"><td>label_6</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
-<tr class="row-even"><td><p>label_7</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tr class="row-even"><td>label_7</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
-<tr class="row-odd"><td><p>label_7</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
-<td><p>0.40</p></td>
+<tr class="row-odd"><td>label_7</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
+<td>0.40</td>
 </tr>
 </tbody>
 </table>
@@ -155,25 +158,29 @@
 <p><strong>Understanding the config file</strong></p>
 <p>The config file that will be used in this example is available <a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml">here</a>, let us decrypt the main arguments :</p>
 <ul>
-<li><p>The first part regroups the basics :</p>
+<li><p class="first">The first part regroups the basics :</p>
 <blockquote>
 <div><ul class="simple">
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">log</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">True</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L4">l4</a>) allows to print the log in the terminal,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">name</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“summit_doc”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L6">l6</a>) uses the plausible simulated dataset,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">random_state</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">42</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L18">l18</a>) fixes the seed of the random state for this benchmark, it is useful for reproductibility,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">full</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">True</span></span></code>  (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L22">l22</a>) means the benchmark will use the full dataset,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">res_dir</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal string"><span class="pre">“examples/results/example_1/”</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L26">l26</a>) saves the results in <code class="docutils literal notranslate"><span class="pre">summit/multiview_platform/examples/results/example_1</span></code></p></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">log</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">True</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L4">l4</a>) allows to print the log in the terminal,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">name</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“summit_doc”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L6">l6</a>) uses the plausible simulated dataset,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">random_state</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">42</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L18">l18</a>) fixes the seed of the random state for this benchmark, it is useful for reproductibility,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">full</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">True</span></span></code>  (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L22">l22</a>) means the benchmark will use the full dataset,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">res_dir</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal string"><span class="pre">“examples/results/example_1/”</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L26">l26</a>) saves the results in <code class="docutils literal notranslate"><span class="pre">summit/multiview_platform/examples/results/example_1</span></code></li>
 </ul>
 </div></blockquote>
 </li>
-<li><p>Then the classification-related arguments :</p>
+<li><p class="first">Then the classification-related arguments :</p>
 <blockquote>
 <div><ul>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">0.25</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L35">l35</a>) means that 75% of the dataset will be used to test the different classifiers and 25% to train them,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">type</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“monoview”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="literal string"><span class="pre">“multiview”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L43">l43</a>) allows for monoview and multiview algorithms to be used in the benchmark,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“decision_tree”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L45">l45</a>) runs a Decision tree on each view,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“weighted_linear_late_fusion”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L47">l47</a>) runs a late fusion,</p></li>
-<li><p>The metrics configuration (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L52">l52-55</a>)</p>
+<li><p class="first"><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">0.25</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L35">l35</a>) means that 75% of the dataset will be used to test the different classifiers and 25% to train them,</p>
+</li>
+<li><p class="first"><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">type</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“monoview”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="literal string"><span class="pre">“multiview”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L43">l43</a>) allows for monoview and multiview algorithms to be used in the benchmark,</p>
+</li>
+<li><p class="first"><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“decision_tree”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L45">l45</a>) runs a Decision tree on each view,</p>
+</li>
+<li><p class="first"><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“weighted_linear_late_fusion”</span></span><span class="punctuation indicator"><span class="pre">]</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L47">l47</a>) runs a late fusion,</p>
+</li>
+<li><p class="first">The metrics configuration (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_1.yml#L52">l52-55</a>)</p>
 <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">metrics</span><span class="p">:</span>
   <span class="n">accuracy_score</span><span class="p">:{}</span>
   <span class="n">f1_score</span><span class="p">:</span>
@@ -428,8 +435,8 @@ The html version is as follows :</p>
 <p>It is the representation of a matrix, where the rows are the examples, and the columns are the classifiers.</p>
 <p>The examples labelled as <code class="docutils literal notranslate"><span class="pre">Mutual_error_*</span></code> are mis-classified by most of the algorithms, the redundant ones are well-classified, and the complementary ones are mixly classified.</p>
 <div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>It is highly recommended to zoom in the html figure to see each row.</p>
+<p class="first admonition-title">Note</p>
+<p class="last">It is highly recommended to zoom in the html figure to see each row.</p>
 </div>
 <html>
 <head><meta charset="utf-8" /></head>
@@ -510,7 +517,8 @@ It could mean that the example is incorrectly labeled in the dataset or is very
 <div class="section" id="classifier-dependant-files">
 <h3>Classifier-dependant files<a class="headerlink" href="#classifier-dependant-files" title="Permalink to this headline">¶</a></h3>
 <p>For each classifier, at least one file is generated, called <code class="docutils literal notranslate"><span class="pre">*-summary.txt</span></code>.</p>
-<pre class="literal-block">Classification on doc_summit for generated_view_1 with decision_tree.
+<pre class="literal-block">
+Classification on doc_summit for generated_view_1 with decision_tree.
 
 Database configuration : 
         - Database name : doc_summit
@@ -565,6 +573,7 @@ Feature importances :
 - Feature index : 0, feature importance : 0.42752552366689905
 - Feature index : 1, feature importance : 0.4031352121725926
 - Feature index : 2, feature importance : 0.1693392641605082
+
 </pre>
 <p>This regroups the useful information on the classifier’s configuration and it’s performance. An interpretation section is available for classifiers that present some interpretation-related information (as feature importance).</p>
 </div>
diff --git a/docs/build/tutorials/example2.html b/docs/build/tutorials/example2.html
index 5636698a2d5e01c5dfbee1dd9a479522f88f7776..7788053210b9265fff95a30df2ed219949f5f86c 100644
--- a/docs/build/tutorials/example2.html
+++ b/docs/build/tutorials/example2.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>Example 2 : Understanding the hyper-parameter optimization &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -53,10 +55,10 @@
 <p>In order to understand the process and it’s usefulness, let’s run some configurations and analyze the results.</p>
 <p>This example will focus only on some lines of the configuration file :</p>
 <ul class="simple">
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the ratio between the testing set and the training set,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_type</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the type of hyper-parameter search,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_args</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the parameters of the hyper-parameters search method,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">nb_folds</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of folds in the cross-validation process.</p></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the ratio between the testing set and the training set,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_type</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the type of hyper-parameter search,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_args</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the parameters of the hyper-parameters search method,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">nb_folds</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of folds in the cross-validation process.</li>
 </ul>
 <div class="section" id="example-2-1-no-hyper-parameter-optimization-impact-of-split-size">
 <h3>Example 2.1 : No hyper-parameter optimization, impact of split size<a class="headerlink" href="#example-2-1-no-hyper-parameter-optimization-impact-of-split-size" title="Permalink to this headline">¶</a></h3>
@@ -65,13 +67,13 @@
 and the multivew classifier is a late fusion majority vote.</p>
 <p>In order to use only a subset of the available classifiers, three lines in the configuration file are useful :</p>
 <ul class="simple">
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">type</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l45</a>) in which one has to specify which type of algorithms are needed, here we used  <code class="docutils literal notranslate"><span class="pre">type:</span> <span class="pre">[&quot;monoview&quot;,&quot;multiview&quot;]</span></code>,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l47</a>) in which one specifies the names of the monoview algorithms to run, here we used : <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“decision_tree”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="literal string"><span class="pre">“adaboost”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="punctuation indicator"><span class="pre">]</span></span></code></p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l49</a>) is the same but with multiview algorithms, here we used : <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“weighted_linear_late_fusion”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="punctuation indicator"><span class="pre">]</span></span></code></p></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">type</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l45</a>) in which one has to specify which type of algorithms are needed, here we used  <code class="docutils literal notranslate"><span class="pre">type:</span> <span class="pre">[&quot;monoview&quot;,&quot;multiview&quot;]</span></code>,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l47</a>) in which one specifies the names of the monoview algorithms to run, here we used : <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_monoview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“decision_tree”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="literal string"><span class="pre">“adaboost”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="punctuation indicator"><span class="pre">]</span></span></code></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L45">l49</a>) is the same but with multiview algorithms, here we used : <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">algos_multiview</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="punctuation indicator"><span class="pre">[</span></span><span class="literal string"><span class="pre">“weighted_linear_late_fusion”</span></span><span class="punctuation indicator"><span class="pre">,</span></span> <span class="punctuation indicator"><span class="pre">]</span></span></code></li>
 </ul>
 <div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>For the platform to understand the names, the user has to give the <strong>name of the python module</strong> in which the classifier is implemented in the platform.</p>
+<p class="first admonition-title">Note</p>
+<p class="last">For the platform to understand the names, the user has to give the <strong>name of the python module</strong> in which the classifier is implemented in the platform.</p>
 </div>
 <p>In the config file, the default values for Adaboost’s hyper-parameters are :</p>
 <div class="highlight-yaml notranslate"><div class="highlight"><pre><span></span><span class="nt">adaboost</span><span class="p">:</span>
@@ -103,7 +105,7 @@ and the multivew classifier is a late fusion majority vote.</p>
 <h4>Learning on a few examples<a class="headerlink" href="#learning-on-a-few-examples" title="Permalink to this headline">¶</a></h4>
 <p>This example focuses on one line of the config file :</p>
 <ul class="simple">
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">0.80</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L37">l37</a>).</p></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">split</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">0.80</span></span></code> (<a class="reference external" href="https://gitlab.lis-lab.fr/baptiste.bauvin/summit/-/tree/master/multiview_platform/examples/config_files/config_example_2_1_1.yml#L37">l37</a>).</li>
 </ul>
 <p>To run the first part of this example, run :</p>
 <div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">multiview_platform.execute</span> <span class="kn">import</span> <span class="n">execute</span>
@@ -176,8 +178,8 @@ and the multivew classifier is a late fusion majority vote.</p>
 <h4>Conclusion<a class="headerlink" href="#conclusion" title="Permalink to this headline">¶</a></h4>
 <p>The split ratio has two consequences :</p>
 <ul class="simple">
-<li><p>Increasing the test set size decreases the information available in the train set size so either it helps to avoid overfitting (Adaboost) or it can hide useful information to the classifier and therefor decrease its performance (decision tree),</p></li>
-<li><p>The second consequence is that increasing train size will increase the benchmark duration as the classifiers will have to learn  on more examples, this duration modification is higher if the dataset has high dimensionality and if the algorithms are complex.</p></li>
+<li>Increasing the test set size decreases the information available in the train set size so either it helps to avoid overfitting (Adaboost) or it can hide useful information to the classifier and therefor decrease its performance (decision tree),</li>
+<li>The second consequence is that increasing train size will increase the benchmark duration as the classifiers will have to learn  on more examples, this duration modification is higher if the dataset has high dimensionality and if the algorithms are complex.</li>
 </ul>
 </div>
 </div>
@@ -190,11 +192,11 @@ This is only useful if one knows the optimal combination of hyper-parameter for
 <p>In this example, we will use a randomized search, one of the two hyper-parameter optimization methods implemented in SuMMIT.</p>
 <p>To do so we will go through five lines of the config file :</p>
 <ul class="simple">
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_type</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the type of hyper-parameter search,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">n_iter</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of random draws during the hyper-parameter search,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">equivalent_draws</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number fo draws for multiview algorithms,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">nb_folds</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of folds in the cross-validation process,</p></li>
-<li><p><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">metric_princ</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling which metric will be used in the cross-validation.</p></li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">hps_type</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the type of hyper-parameter search,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">n_iter</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of random draws during the hyper-parameter search,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">equivalent_draws</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number fo draws for multiview algorithms,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">nb_folds</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling the number of folds in the cross-validation process,</li>
+<li><code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">metric_princ</span></span><span class="punctuation"><span class="pre">:</span></span></code>, controlling which metric will be used in the cross-validation.</li>
 </ul>
 <p>So if you run SuMMIT with :</p>
 <div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">multiview_platform.execute</span> <span class="kn">import</span> <span class="n">execute</span>
@@ -238,8 +240,8 @@ for each multiview classifier:
 <p>The instructions inside the brackets are the one that the hyper-parameter
 optimization adds.</p>
 <div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>As the randomized search has independent steps, it profits a lot from multi-threading, however, it is not available at the moment, but is one of our priorities.</p>
+<p class="first admonition-title">Note</p>
+<p class="last">As the randomized search has independent steps, it profits a lot from multi-threading, however, it is not available at the moment, but is one of our priorities.</p>
 </div>
 <div class="section" id="the-results">
 <h4>The results<a class="headerlink" href="#the-results" title="Permalink to this headline">¶</a></h4>
@@ -270,8 +272,8 @@ optimization adds.</p>
 </body>
 </html><p>The choice made here is to allow a different amount of draws for mono and multiview classifiers. However, allowing the same number of draws to both is also available by setting <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">equivalent_draws</span></span><span class="punctuation"><span class="pre">:</span></span> <span class="literal scalar plain"><span class="pre">False</span></span></code>.</p>
 <div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>The mutliview algorithm used here is late fusion, which means it learns a monoview classifier on each view and then build a naive majority vote. In terms of hyper parameters, the late fusion classifier has to choose one monoview classifier and its hyper-parameter <strong>for each view</strong>. This is why the <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">equivalent_draws</span></span><span class="punctuation"><span class="pre">:</span></span></code> parameter is implemented, as with only 5 draws, the late fusion classifier is not able to remotely cover its hyper-parameter space, while the monoview algorithms have a much easier problem to solve.</p>
+<p class="first admonition-title">Note</p>
+<p class="last">The mutliview algorithm used here is late fusion, which means it learns a monoview classifier on each view and then build a naive majority vote. In terms of hyper parameters, the late fusion classifier has to choose one monoview classifier and its hyper-parameter <strong>for each view</strong>. This is why the <code class="code yaml docutils literal notranslate"><span class="name tag"><span class="pre">equivalent_draws</span></span><span class="punctuation"><span class="pre">:</span></span></code> parameter is implemented, as with only 5 draws, the late fusion classifier is not able to remotely cover its hyper-parameter space, while the monoview algorithms have a much easier problem to solve.</p>
 </div>
 </div>
 <div class="section" id="id2">
@@ -309,8 +311,8 @@ with different fold/draws settings :</p>
         </div>
 </body>
 </html><div class="admonition note">
-<p class="admonition-title">Note</p>
-<p>The durations are for reference only as they highly depend on hardware.</p>
+<p class="first admonition-title">Note</p>
+<p class="last">The durations are for reference only as they highly depend on hardware.</p>
 </div>
 </div>
 </div>
@@ -344,8 +346,8 @@ several depths for a decision tree, and several <code class="code yaml docutils
 </div>
 <p>This will run the late fusion classifier with either</p>
 <ul class="simple">
-<li><p>one decision tree per view, with a maximum depth of 3,</p></li>
-<li><p>one Adaboost per view with 10 base estimators.</p></li>
+<li>one decision tree per view, with a maximum depth of 3,</li>
+<li>one Adaboost per view with 10 base estimators.</li>
 </ul>
 <p>To run a grid search with this configuration, run :</p>
 <div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">multiview_platform.execute</span> <span class="kn">import</span> <span class="n">execute</span>
@@ -363,6 +365,7 @@ same dataset.</p>
 <p>For most of the algorithms, it is possible to paste the report in the config fie,
 for example for the decision tree on the first view the <code class="docutils literal notranslate"><span class="pre">*-hps_report.txt</span></code> file generated by the randomized search of <a class="reference external" href="http://baptiste.bauvin.pages.lis-lab.fr/summit/tutorials/example2.html#example-2-2-usage-of-randomized-hyper-parameter-optimization">example 2.2</a> looks like :</p>
 <pre class="literal-block">
+
 criterion: gini
 max_depth: 202
 splitter: random
@@ -387,7 +390,8 @@ criterion: entropy
 max_depth: 182
 splitter: best
 
-                0.20454545454545453</pre>
+                0.20454545454545453
+</pre>
 <p>Meaning that the cross validation score of the decision tree on the first view when using the following hyper-parameters is 0.2879.</p>
 <div class="highlight-yaml notranslate"><div class="highlight"><pre><span></span><span class="nt">criterion</span><span class="p">:</span> <span class="l l-Scalar l-Scalar-Plain">gini</span>
 <span class="nt">max_depth</span><span class="p">:</span> <span class="l l-Scalar l-Scalar-Plain">202</span>
diff --git a/docs/build/tutorials/index.html b/docs/build/tutorials/index.html
index aeacef1140c223b635916168b27714555134a64a..7a9efe1ba31e185236483dfc299224a7145baf44 100644
--- a/docs/build/tutorials/index.html
+++ b/docs/build/tutorials/index.html
@@ -1,9 +1,11 @@
 
-<!DOCTYPE html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 
-<html>
+<html xmlns="http://www.w3.org/1999/xhtml">
   <head>
-    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     <title>SuMMIT Tutorials &#8212; SuMMIT 0 documentation</title>
     <link rel="stylesheet" href="../_static/classic.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -18,7 +20,7 @@
     <link rel="index" title="Index" href="../genindex.html" />
     <link rel="search" title="Search" href="../search.html" />
     <link rel="next" title="Install SuMMIT" href="installation.html" />
-    <link rel="prev" title="&lt;no title&gt;" href="../readme_link.html" /> 
+    <link rel="prev" title="Welcome to Supervised MultiModal Integration Tool’s documentation" href="../index.html" /> 
   </head><body>
     <div class="related" role="navigation" aria-label="related navigation">
       <h3>Navigation</h3>
@@ -33,7 +35,7 @@
           <a href="installation.html" title="Install SuMMIT"
              accesskey="N">next</a> |</li>
         <li class="right" >
-          <a href="../readme_link.html" title="&lt;no title&gt;"
+          <a href="../index.html" title="Welcome to Supervised MultiModal Integration Tool’s documentation"
              accesskey="P">previous</a> |</li>
         <li class="nav-item nav-item-0"><a href="../index.html">SuMMIT 0 documentation</a> &#187;</li> 
       </ul>
@@ -67,8 +69,8 @@
       <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
         <div class="sphinxsidebarwrapper">
   <h4>Previous topic</h4>
-  <p class="topless"><a href="../readme_link.html"
-                        title="previous chapter">&lt;no title&gt;</a></p>
+  <p class="topless"><a href="../index.html"
+                        title="previous chapter">Welcome to Supervised MultiModal Integration Tool’s documentation</a></p>
   <h4>Next topic</h4>
   <p class="topless"><a href="installation.html"
                         title="next chapter">Install SuMMIT</a></p>
@@ -106,7 +108,7 @@
           <a href="installation.html" title="Install SuMMIT"
              >next</a> |</li>
         <li class="right" >
-          <a href="../readme_link.html" title="&lt;no title&gt;"
+          <a href="../index.html" title="Welcome to Supervised MultiModal Integration Tool’s documentation"
              >previous</a> |</li>
         <li class="nav-item nav-item-0"><a href="../index.html">SuMMIT 0 documentation</a> &#187;</li> 
       </ul>
diff --git a/docs/source/analyzeresult.rst b/docs/source/analyzeresult.rst
index 1477e2efa0768f3d3dc09bbb52e974d931d65391..69a31ddb9eef2530ceec13c0d4b59be48945260c 100644
--- a/docs/source/analyzeresult.rst
+++ b/docs/source/analyzeresult.rst
@@ -1,5 +1,5 @@
 Result analysis module
 ======================
 
-.. automodule:: multiview_platform.mono_multi_view_classifiers.result_analysis
+.. automodule:: summit.multiview_platform.result_analysis
    :members:
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 01c4690c87846617580ecf3d26d6be7b778d329b..2ab75d491a5e0f55b7fa97b147eaabf0c8e183ac 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -22,11 +22,11 @@ import os
 import sys
 
 sys.path.insert(0, os.path.abspath('.'))
-sys.path.insert(0, os.path.abspath('../../multiview_platform'))
+sys.path.insert(0, os.path.abspath('../../summit'))
 sys.path.insert(0, os.path.abspath('../..'))
 file_loc = os.path.split(__file__)[0]
 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(file_loc), '.')))
-# import multiview_platform
+# import summit
 # -- General configuration ------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
diff --git a/docs/source/execution.rst b/docs/source/execution.rst
index b465e63f09e2175ba485d45e2d82b79bab573baa..50fe10ef726303cedc3125bab3fa2ab77581bc21 100644
--- a/docs/source/execution.rst
+++ b/docs/source/execution.rst
@@ -1,6 +1,6 @@
 Welcome to the exection documentation
 =====================================
 
-.. automodule:: multiview_platform.execute
+.. automodule:: summit.execute
    :members:
 
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
index 684fd7ceb985e96b2dd74692d4406610c49b7c92..994afd41fa2423a09dba70c484bb776ce986396b 100644
--- a/docs/source/modules.rst
+++ b/docs/source/modules.rst
@@ -1,7 +1,7 @@
-multiview_platform
-==================
+summit
+======
 
 .. toctree::
    :maxdepth: 4
 
-   multiview_platform
+   summit
diff --git a/multiview_platform/__init__.py b/multiview_platform/__init__.py
deleted file mode 100644
index f51f7a705fcb8d3f34a1083b6f1753b2098d4520..0000000000000000000000000000000000000000
--- a/multiview_platform/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""This is a test docstring to test stuff"""
-
-__version__ = "0.0.0.0"
-
-from . import mono_multi_view_classifiers, execute
diff --git a/multiview_platform/examples/__init__.py b/multiview_platform/examples/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/examples/config_files/config_example_0.yml b/multiview_platform/examples/config_files/config_example_0.yml
deleted file mode 100644
index d16f5843c1a5ddfd8e564e874f7b02f94b4d8f08..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_0.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "digits_doc"
-# A label for the resul directory
-label: "example_0"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_0/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# The ratio of test examples/number of train examples
-split: 0.25
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class:
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree"]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_early_fusion", "weighted_linear_late_fusion",]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: "None"
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-### Configuring the hyper-parameters for the classifiers
-
-decision_tree:
-  max_depth: 3
-
-weighted_linear_early_fusion:
-  monoview_classifier_name: "decision_tree"
-  monoview_classifier_config:
-    decision_tree:
-      max_depth: 6
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 3
diff --git a/multiview_platform/examples/config_files/config_example_1.yml b/multiview_platform/examples/config_files/config_example_1.yml
deleted file mode 100644
index fb9ab405aa0015885cccc08941b50c1c0188e9b7..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_1.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the resul directory
-label: "example_1"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/mkljlj"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_1/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# The ratio of test examples/number of train examples
-split: 0.35
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class:
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree"]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion",]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: "None"
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-### Configuring the hyper-parameters for the classifiers
-
-decision_tree:
-  max_depth: 3
-
-weighted_linear_early_fusion:
-  monoview_classifier_name: "decision_tree"
-  monoview_classifier_config:
-    decision_tree:
-      max_depth: 6
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 3
diff --git a/multiview_platform/examples/config_files/config_example_2_1_1.yml b/multiview_platform/examples/config_files/config_example_2_1_1.yml
deleted file mode 100644
index b1a9e2e7379828d8b5a7ef1c1fd22c80862e2b6e..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_2_1_1.yml
+++ /dev/null
@@ -1,83 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the resul directory
-label: "example_2_1_1"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_2_1_1/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# If the dataset is multiclass, will use this multiclass-to-biclass method
-multiclass_method: "oneVersusOne"
-# The ratio number of test exmaples/number of train examples
-split: 0.8
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class: 2
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree", "adaboost", ]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion", ]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: None
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-decision_tree:
-  max_depth: 3
-
-adaboost:
-  base_estimator: "DecisionTreeClassifier"
-  n_estimators: 50
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 2
-
-
-# The following arguments are classifier-specific, and are documented in each
-# of the corresponding modules.
-
-# In order to run multiple sets of parameters, use multiple values in the
-# following lists, and set hps_type to None.
diff --git a/multiview_platform/examples/config_files/config_example_2_1_2.yml b/multiview_platform/examples/config_files/config_example_2_1_2.yml
deleted file mode 100644
index 256e18a8ade378f29c32ead31fcbba28b1db62b3..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_2_1_2.yml
+++ /dev/null
@@ -1,83 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the resul directory
-label: "example_2_1_2"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_2_1_2/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# If the dataset is multiclass, will use this multiclass-to-biclass method
-multiclass_method: "oneVersusOne"
-# The ratio number of test exmaples/number of train examples
-split: 0.2
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class: 2
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree", "adaboost", ]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion", ]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: None
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-decision_tree:
-  max_depth: 3
-
-adaboost:
-  base_estimator: "DecisionTreeClassifier"
-  n_estimators: 50
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 2
-
-
-# The following arguments are classifier-specific, and are documented in each
-# of the corresponding modules.
-
-# In order to run multiple sets of parameters, use multiple values in the
-# following lists, and set hps_type to None.
diff --git a/multiview_platform/examples/config_files/config_example_2_2_1.yml b/multiview_platform/examples/config_files/config_example_2_2_1.yml
deleted file mode 100644
index d462bee16fcf44ecf939c14776435ef15ef8c3ae..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_2_2_1.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the result directory
-label: "example_2_2_1"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_2_2_1/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# If the dataset is multiclass, will use this multiclass-to-biclass method
-multiclass_method: "oneVersusOne"
-# The ratio number of test exmaples/number of train examples
-split: 0.8
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 5
-# The number of classes to select in the dataset
-nb_class: 2
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree", "adaboost", ]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion", ]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: 'Random'
-# The number of iteration in the hyper-parameter optimization process
-hps_args:
-  n_iter: 5
-  equivalent_draws: True
-
-# The following arguments are classifier-specific, and are documented in each
-# of the corresponding modules.
-
-# In order to run multiple sets of parameters, use multiple values in the
-# following lists, and set hps_type to None.
diff --git a/multiview_platform/examples/config_files/config_example_2_3.yml b/multiview_platform/examples/config_files/config_example_2_3.yml
deleted file mode 100644
index bb8fb31ef226ae547d01a909b9eef108df0ba998..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_2_3.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the result directory
-label: "example_2_3"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_2_3/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# If the dataset is multiclass, will use this multiclass-to-biclass method
-multiclass_method: "oneVersusOne"
-# The ratio number of test exmaples/number of train examples
-split: 0.8
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 5
-# The number of classes to select in the dataset
-nb_class: 2
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree", "adaboost", ]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion", ]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: 'Grid'
-# The number of iteration in the hyper-parameter optimization process
-hps_args:
-  decision_tree:
-    max_depth: [1,2,3,4,5]
-
-  adaboost:
-    n_estimators: [10,15,20,25]
-
-  weighted_linear_late_fusion:
-    classifiers_names:
-      - ["decision_tree", "decision_tree", "decision_tree", "decision_tree"]
-      - ["adaboost", "adaboost", "adaboost", "adaboost",]
-
-    classifier_configs:
-      - decision_tree:
-          max_depth: 3
-        adaboost:
-          n_estimators: 10
-
-
-
-# The following arguments are classifier-specific, and are documented in each
-# of the corresponding modules.
-
-# In order to run multiple sets of parameters, use multiple values in the
-# following lists, and set hps_type to None.
diff --git a/multiview_platform/examples/config_files/config_example_3.yml b/multiview_platform/examples/config_files/config_example_3.yml
deleted file mode 100644
index 67ef06ea9b4b9da10e781133d20e0bb0fcaf1abb..0000000000000000000000000000000000000000
--- a/multiview_platform/examples/config_files/config_example_3.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: True
-# The name of each dataset in the directory on which the benchmark should be run
-name: "doc_summit"
-# A label for the result directory
-label: "example_3"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "examples/results/example_3/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# If the dataset is multiclass, will use this multiclass-to-biclass method
-multiclass_method: "oneVersusOne"
-# The ratio number of test exmaples/number of train examples
-split: 0.8
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 5
-# The number of classes to select in the dataset
-nb_class: 2
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree", "adaboost", ]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_late_fusion", ]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 5
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: 'None'
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-decision_tree:
-  max_depth: 3
-
-adaboost:
-  base_estimator: "DecisionTreeClassifier"
-  n_estimators: 10
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 2
-
-
-
-# The following arguments are classifier-specific, and are documented in each
-# of the corresponding modules.
-
-# In order to run multiple sets of parameters, use multiple values in the
-# following lists, and set hps_type to None.
diff --git a/multiview_platform/examples/data/digits_doc.hdf5 b/multiview_platform/examples/data/digits_doc.hdf5
deleted file mode 100644
index 61e452b9118eeabc4972c11803a8bba775dc3301..0000000000000000000000000000000000000000
Binary files a/multiview_platform/examples/data/digits_doc.hdf5 and /dev/null differ
diff --git a/multiview_platform/examples/data/doc_summit.hdf5 b/multiview_platform/examples/data/doc_summit.hdf5
deleted file mode 100644
index 8400dd06429e58d67c98d7b9b1689a534b42e0d8..0000000000000000000000000000000000000000
Binary files a/multiview_platform/examples/data/doc_summit.hdf5 and /dev/null differ
diff --git a/multiview_platform/execute.py b/multiview_platform/execute.py
deleted file mode 100644
index c43c4362c9a7f2755be31da611348d22e6cb2b12..0000000000000000000000000000000000000000
--- a/multiview_platform/execute.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""This is the execution module, used to execute the code"""
-
-import os
-
-
-def execute(config_path=None):  # pragma: no cover
-    import sys
-
-    from multiview_platform.mono_multi_view_classifiers import exec_classif
-    if config_path is None:
-        exec_classif.exec_classif(sys.argv[1:])
-    else:
-        if config_path == "example 0":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_0.yml")
-        elif config_path == "example 1":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_1.yml")
-        elif config_path == "example 2.1.1":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_2_1_1.yml")
-        elif config_path == "example 2.1.2":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_2_1_2.yml")
-        elif config_path == "example 2.2":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_2_2.yml")
-        elif config_path == "example 2.3":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_2_3.yml")
-        elif config_path == "example 3":
-            config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples", "config_files", "config_example_3.yml")
-        exec_classif.exec_classif(["--config_path", config_path])
-
-
-if __name__ == "__main__":
-    execute()
diff --git a/multiview_platform/mono_multi_view_classifiers/__init__.py b/multiview_platform/mono_multi_view_classifiers/__init__.py
deleted file mode 100644
index 9e2c30f3a193aff8b3a8c59f345c15dcff74c7ed..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from . import exec_classif, result_analysis, metrics, monoview_classifiers, \
-    monoview, multiview, utils, multiview_classifiers
-
-__all__ = ['metrics', 'monoview', 'monoview_classifiers', 'multiview', 'utils']
diff --git a/multiview_platform/mono_multi_view_classifiers/exec_classif.py b/multiview_platform/mono_multi_view_classifiers/exec_classif.py
deleted file mode 100644
index 91d931bec67a0a77b9da9732402c180f8784257c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/exec_classif.py
+++ /dev/null
@@ -1,814 +0,0 @@
-import itertools
-import logging
-import os
-import pkgutil
-import time
-import traceback
-
-import matplotlib
-import numpy as np
-from sklearn.tree import DecisionTreeClassifier
-
-# Import own modules
-from . import monoview_classifiers
-from . import multiview_classifiers
-from .monoview.exec_classif_mono_view import exec_monoview
-from .multiview.exec_multiview import exec_multiview
-from .result_analysis.execution import analyze_iterations, analyze
-from .utils import execution, dataset, configuration
-from .utils.organization import secure_file_path
-from .utils.dataset import delete_HDF5
-
-matplotlib.use(
-    'Agg')  # Anti-Grain Geometry C++ library to make a raster (pixel) image of the figure
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def init_benchmark(cl_type, monoview_algos, multiview_algos):
-    r"""Used to create a list of all the algorithm packages names used for the benchmark.
-
-    First this function will check if the benchmark need mono- or/and multiview
-    algorithms and adds to the right
-    dictionary the asked algorithms. If none is asked by the user, all will be added.
-
-    If the keyword `"Benchmark"` is used, all mono- and multiview algorithms will be added.
-
-    Parameters
-    ----------
-    cl_type : List of string
-        List of types of needed benchmark
-    multiview_algos : List of strings
-        List of multiview algorithms needed for the benchmark
-    monoview_algos : Listof strings
-        List of monoview algorithms needed for the benchmark
-    args : ParsedArgumentParser args
-        All the input args (used to tune the algorithms)
-
-    Returns
-    -------
-    benchmark : Dictionary of dictionaries
-        Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
-    """
-    benchmark = {"monoview": {}, "multiview": {}}
-
-    if "monoview" in cl_type:
-        if monoview_algos == ['all']:  # pragma: no cover
-            benchmark["monoview"] = [name for _, name, isPackage in
-                                     pkgutil.iter_modules(
-                                         monoview_classifiers.__path__)
-                                     if not isPackage]
-
-        else:
-            benchmark["monoview"] = monoview_algos
-
-    if "multiview" in cl_type:
-        if multiview_algos == ["all"]:  # pragma: no cover
-            benchmark["multiview"] = [name for _, name, isPackage in
-                                      pkgutil.iter_modules(
-                                          multiview_classifiers.__path__)
-                                      if not isPackage]
-        else:
-            benchmark["multiview"] = multiview_algos
-    return benchmark
-
-
-def init_argument_dictionaries(benchmark, views_dictionary,
-                               nb_class, init_kwargs, hps_method,
-                               hps_kwargs):  # pragma: no cover
-    argument_dictionaries = {"monoview": [], "multiview": []}
-    if benchmark["monoview"]:
-        argument_dictionaries["monoview"] = init_monoview_exps(
-            benchmark["monoview"],
-            views_dictionary,
-            nb_class,
-            init_kwargs["monoview"], hps_method, hps_kwargs)
-    if benchmark["multiview"]:
-        argument_dictionaries["multiview"] = init_multiview_exps(
-            benchmark["multiview"],
-            views_dictionary,
-            nb_class,
-            init_kwargs["multiview"], hps_method, hps_kwargs)
-    return argument_dictionaries
-
-
-def init_multiview_exps(classifier_names, views_dictionary, nb_class,
-                        kwargs_init, hps_method, hps_kwargs): # pragma: no cover
-    multiview_arguments = []
-    for classifier_name in classifier_names:
-        arguments = get_path_dict(kwargs_init[classifier_name])
-        if hps_method == "Grid":
-            multiview_arguments += [
-                gen_single_multiview_arg_dictionary(classifier_name,
-                                                    arguments,
-                                                    nb_class,
-                                                    {"param_grid":hps_kwargs[classifier_name]},
-                                                    views_dictionary=views_dictionary)]
-        elif hps_method == "Random":
-            hps_kwargs = dict((key, value)
-                              for key, value in hps_kwargs.items()
-                              if key in ["n_iter", "equivalent_draws"])
-            multiview_arguments += [
-                gen_single_multiview_arg_dictionary(classifier_name,
-                                                    arguments,
-                                                    nb_class,
-                                                    hps_kwargs,
-                                                    views_dictionary=views_dictionary)]
-        elif hps_method == "None":
-            multiview_arguments += [
-                gen_single_multiview_arg_dictionary(classifier_name,
-                                                    arguments,
-                                                    nb_class,
-                                                    hps_kwargs,
-                                                    views_dictionary=views_dictionary)]
-        else:
-            raise ValueError('At the moment only "None",  "Random" or "Grid" '
-                             'are available as hyper-parameter search '
-                             'methods, sadly "{}" is not'.format(hps_method)
-                             )
-
-    return multiview_arguments
-
-
-def init_monoview_exps(classifier_names,
-                       views_dictionary, nb_class, kwargs_init, hps_method,
-                       hps_kwargs): # pragma: no cover
-    r"""Used to add each monoview exeperience args to the list of monoview experiences args.
-
-    First this function will check if the benchmark need mono- or/and multiview algorithms and adds to the right
-    dictionary the asked algorithms. If none is asked by the user, all will be added.
-
-    If the keyword `"Benchmark"` is used, all mono- and multiview algorithms will be added.
-
-    Parameters
-    ----------
-    classifier_names : dictionary
-        All types of monoview and multiview experiments that have to be benchmarked
-    argument_dictionaries : dictionary
-        Maps monoview and multiview experiments arguments.
-    views_dictionary : dictionary
-        Maps the view names to their index in the HDF5 dataset
-    nb_class : integer
-        Number of different labels in the classification
-
-    Returns
-    -------
-    benchmark : Dictionary of dictionaries
-        Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
-    """
-    monoview_arguments = []
-    for view_name, view_index in views_dictionary.items():
-        for classifier_name in classifier_names:
-            if hps_method == "Grid":
-                arguments = gen_single_monoview_arg_dictionary(classifier_name,
-                                                                         kwargs_init,
-                                                                         nb_class,
-                                                                         view_index,
-                                                                         view_name,
-                                                               {"param_grid":
-                                                                    hps_kwargs[classifier_name]})
-            elif hps_method == "Random":
-                hps_kwargs = dict((key, value)
-                                  for key, value in hps_kwargs.items()
-                                  if key in ["n_iter", "equivalent_draws"])
-                arguments = gen_single_monoview_arg_dictionary(classifier_name,
-                                                               kwargs_init,
-                                                               nb_class,
-                                                               view_index,
-                                                               view_name,
-                                                               hps_kwargs)
-            elif hps_method == "None":
-                arguments = gen_single_monoview_arg_dictionary(classifier_name,
-                                                               kwargs_init,
-                                                               nb_class,
-                                                               view_index,
-                                                               view_name,
-                                                               hps_kwargs)
-
-            else:
-                raise ValueError('At the moment only "None",  "Random" or "Grid" '
-                                 'are available as hyper-parameter search '
-                                 'methods, sadly "{}" is not'.format(hps_method)
-                                 )
-            monoview_arguments.append(arguments)
-    return monoview_arguments
-
-
-def gen_single_monoview_arg_dictionary(classifier_name, arguments, nb_class,
-                                       view_index, view_name, hps_kwargs):
-    if classifier_name in arguments:
-        classifier_config = dict((key, value) for key, value in arguments[
-            classifier_name].items())
-    else:
-        classifier_config = {}
-    return {classifier_name: classifier_config,
-            "view_name": view_name,
-            "view_index": view_index,
-            "classifier_name": classifier_name,
-            "nb_class": nb_class,
-            "hps_kwargs":hps_kwargs }
-
-
-def gen_single_multiview_arg_dictionary(classifier_name, arguments, nb_class,
-                                        hps_kwargs, views_dictionary=None):
-    return {"classifier_name": classifier_name,
-            "view_names": list(views_dictionary.keys()),
-            'view_indices': list(views_dictionary.values()),
-            "nb_class": nb_class,
-            "labels_names": None,
-            "hps_kwargs": hps_kwargs,
-            classifier_name: extract_dict(arguments)
-            }
-
-
-def extract_dict(classifier_config):
-    """Reverse function of get_path_dict"""
-    extracted_dict = {}
-    for key, value in classifier_config.items():
-        extracted_dict = set_element(extracted_dict, key, value)
-    return extracted_dict
-
-
-def set_element(dictionary, path, value):
-    """Set value in dictionary at the location indicated by path"""
-    existing_keys = path.split(".")[:-1]
-    dict_state = dictionary
-    for existing_key in existing_keys:
-        if existing_key in dict_state:
-            dict_state = dict_state[existing_key]
-        else:
-            dict_state[existing_key] = {}
-            dict_state = dict_state[existing_key]
-    dict_state[path.split(".")[-1]] = value
-    return dictionary
-
-
-def get_path_dict(multiview_classifier_args):
-    """This function is used to generate a dictionary with each key being
-    the path to the value.
-    If given {"key1":{"key1_1":value1}, "key2":value2}, it will return
-    {"key1.key1_1":value1, "key2":value2}"""
-    path_dict = dict(
-        (key, value) for key, value in multiview_classifier_args.items())
-    paths = is_dict_in(path_dict)
-    while paths:
-        for path in paths:
-            for key, value in path_dict[path].items():
-                path_dict[".".join([path, key])] = value
-            path_dict.pop(path)
-        paths = is_dict_in(path_dict)
-    return path_dict
-
-
-def is_dict_in(dictionary):
-    """
-    Returns True if any of the dictionary value is a dictionary itself.
-
-    Parameters
-    ----------
-    dictionary
-
-    Returns
-    -------
-
-    """
-    paths = []
-    for key, value in dictionary.items():
-        if isinstance(value, dict):
-            paths.append(key)
-    return paths
-
-def init_kwargs(args, classifiers_names, framework="monoview"):
-    r"""Used to init kwargs thanks to a function in each monoview classifier package.
-
-    Parameters
-    ----------
-    args : parsed args objects
-        All the args passed by the user.
-    classifiers_names : list of strings
-        List of the benchmarks's monoview classifiers names.
-
-    Returns
-    -------
-    kwargs : Dictionary
-        Dictionary resuming all the specific arguments for the benchmark, one dictionary for each classifier.
-
-        For example, for Adaboost, the KWARGS will be `{"n_estimators":<value>, "base_estimator":<value>}`"""
-
-    logging.debug("Start:\t Initializing monoview classifiers arguments")
-    kwargs = {}
-    for classifiers_name in classifiers_names:
-        try:
-            if framework == "monoview":
-                getattr(monoview_classifiers, classifiers_name)
-            else:
-                getattr(multiview_classifiers, classifiers_name)
-        except AttributeError:
-            raise AttributeError(
-                classifiers_name + " is not implemented in monoview_classifiers, "
-                                   "please specify the name of the file in monoview_classifiers")
-        if classifiers_name in args:
-            kwargs[classifiers_name] = args[classifiers_name]
-        else:
-            kwargs[classifiers_name] = {}
-    logging.debug("Done:\t Initializing monoview classifiers arguments")
-
-    return kwargs
-
-
-def init_kwargs_func(args, benchmark):
-    """
-    Dispached the kwargs initialization to monoview and multiview and creates
-    the kwargs variable
-
-    Parameters
-    ----------
-    args : parsed args objects
-        All the args passed by the user.
-
-    benchmark : dict
-        The name of the mono- and mutli-view classifiers to run in the benchmark
-
-    Returns
-    -------
-
-    kwargs : dict
-        The arguments for each mono- and multiview algorithms
-    """
-    monoview_kwargs = init_kwargs(args, benchmark["monoview"],
-                                  framework="monoview")
-    multiview_kwargs = init_kwargs(args, benchmark["multiview"],
-                                   framework="multiview")
-    kwargs = {"monoview": monoview_kwargs, "multiview": multiview_kwargs}
-    return kwargs
-
-
-def arange_metrics(metrics, metric_princ):
-    """Used to get the metrics list in the right order so that
-    the first one is the principal metric specified in args
-
-    Parameters
-    ----------
-    metrics : dict
-        The metrics that will be used in the benchmark
-
-    metric_princ : str
-        The name of the metric that need to be used for the hyper-parameter
-        optimization process
-
-    Returns
-    -------
-    metrics : list of lists
-        The metrics list, but arranged  so the first one is the principal one."""
-    if metric_princ in metrics:
-        metrics = dict((key, value) if not key == metric_princ else (key+"*", value) for key, value in metrics.items())
-    else:
-        raise ValueError("{} not in metric pool ({})".format(metric_princ,
-                                                                 metrics))
-    return metrics
-
-
-def benchmark_init(directory, classification_indices, labels, labels_dictionary,
-                   k_folds, dataset_var):
-    """
-    Initializes the benchmark, by saving the indices of the train
-    examples and the cross validation folds.
-
-    Parameters
-    ----------
-    directory : str
-        The benchmark's result directory
-
-    classification_indices : numpy array
-        The indices of the examples, splitted for the train/test split
-
-    labels : numpy array
-        The labels of the dataset
-
-    labels_dictionary : dict
-        The dictionary with labels as keys and their names as values
-
-    k_folds : sklearn.model_selection.Folds object
-        The folds for the cross validation process
-
-    Returns
-    -------
-
-    """
-    logging.debug("Start:\t Benchmark initialization")
-    secure_file_path(os.path.join(directory, "train_labels.csv"))
-    train_indices = classification_indices[0]
-    train_labels = dataset_var.get_labels(example_indices=train_indices)
-    np.savetxt(os.path.join(directory, "train_labels.csv"), train_labels,
-               delimiter=",")
-    np.savetxt(os.path.join(directory, "train_indices.csv"),
-               classification_indices[0],
-               delimiter=",")
-    results_monoview = []
-    folds = k_folds.split(np.arange(len(train_labels)), train_labels)
-    min_fold_len = int(len(train_labels) / k_folds.n_splits)
-    for fold_index, (train_cv_indices, test_cv_indices) in enumerate(folds):
-        file_name = os.path.join(directory, "folds", "test_labels_fold_" + str(
-            fold_index) + ".csv")
-        secure_file_path(file_name)
-        np.savetxt(file_name, train_labels[test_cv_indices[:min_fold_len]],
-                   delimiter=",")
-    labels_names = list(labels_dictionary.values())
-    logging.debug("Done:\t Benchmark initialization")
-    return results_monoview, labels_names
-
-
-# def exec_one_benchmark(core_index=-1, labels_dictionary=None, directory=None,
-#                      classification_indices=None, args=None,
-#                      k_folds=None, random_state=None, hyper_param_search=None,
-#                      metrics=None, argument_dictionaries=None,
-#                      benchmark=None, views=None, views_indices=None, flag=None,
-#                      labels=None,
-#                      exec_monoview_multicore=exec_monoview_multicore,
-#                      exec_multiview_multicore=exec_multiview_multicore,):
-#     """Used to run a benchmark using one core. ExecMonoview_multicore, initMultiviewArguments and
-#      exec_multiview_multicore args are only used for tests"""
-#
-#     results_monoview, labels_names = benchmark_init(directory,
-#                                                     classification_indices, labels,
-#                                                     labels_dictionary, k_folds)
-#
-#     logging.debug("Start:\t monoview benchmark")
-#     results_monoview += [
-#         exec_monoview_multicore(directory, args["name"], labels_names,
-#                                classification_indices, k_folds,
-#                                core_index, args["file_type"], args["pathf"], random_state,
-#                                labels,
-#                                hyper_param_search=hyper_param_search,
-#                                metrics=metrics,
-#                                n_iter=args["hps_iter"], **argument)
-#         for argument in argument_dictionaries["Monoview"]]
-#     logging.debug("Done:\t monoview benchmark")
-#
-#
-#     logging.debug("Start:\t multiview benchmark")
-#     results_multiview = [
-#         exec_multiview_multicore(directory, core_index, args["name"],
-#                                 classification_indices, k_folds, args["file_type"],
-#                                 args["pathf"], labels_dictionary, random_state,
-#                                 labels, hyper_param_search=hyper_param_search,
-#                                 metrics=metrics, n_iter=args["hps_iter"],
-#                                 **arguments)
-#         for arguments in argument_dictionaries["multiview"]]
-#     logging.debug("Done:\t multiview benchmark")
-#
-#     return [flag, results_monoview + results_multiview]
-#
-#
-# def exec_one_benchmark_multicore(nb_cores=-1, labels_dictionary=None,
-#                                  directory=None, classification_indices=None,
-#                                  args=None,
-#                                  k_folds=None, random_state=None,
-#                                  hyper_param_search=None, metrics=None,
-#                                  argument_dictionaries=None,
-#                                  benchmark=None, views=None, views_indices=None,
-#                                  flag=None, labels=None,
-#                                  exec_monoview_multicore=exec_monoview_multicore,
-#                                  exec_multiview_multicore=exec_multiview_multicore,):
-#     """Used to run a benchmark using multiple cores. ExecMonoview_multicore, initMultiviewArguments and
-#      exec_multiview_multicore args are only used for tests"""
-#
-#     results_monoview, labels_names = benchmark_init(directory,
-#                                                     classification_indices, labels,
-#                                                     labels_dictionary, k_folds)
-#
-#     logging.debug("Start:\t monoview benchmark")
-#     nb_experiments = len(argument_dictionaries["monoview"])
-#     nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
-#     for step_index in range(nb_multicore_to_do):
-#         results_monoview += (Parallel(n_jobs=nb_cores)(
-#             delayed(exec_monoview_multicore)(directory, args["name"], labels_names,
-#                                             classification_indices, k_folds,
-#                                             core_index, args["file_type"], args["pathf"],
-#                                             random_state, labels,
-#                                             hyper_param_search=hyper_param_search,
-#                                             metrics=metrics,
-#                                             n_iter=args["hps_iter"],
-#                                             **argument_dictionaries["monoview"][
-#                                             core_index + step_index * nb_cores])
-#             for core_index in
-#             range(min(nb_cores, nb_experiments - step_index * nb_cores))))
-#     logging.debug("Done:\t monoview benchmark")
-#
-#     logging.debug("Start:\t multiview arguments initialization")
-#     # argument_dictionaries = initMultiviewArguments(args, benchmark, views,
-#     #                                               views_indices,
-#     #                                               argument_dictionaries,
-#     #                                               random_state, directory,
-#     #                                               resultsMonoview,
-#     #                                               classification_indices)
-#     logging.debug("Done:\t multiview arguments initialization")
-#
-#     logging.debug("Start:\t multiview benchmark")
-#     results_multiview = []
-#     nb_experiments = len(argument_dictionaries["multiview"])
-#     nb_multicore_to_do = int(math.ceil(float(nb_experiments) / nb_cores))
-#     for step_index in range(nb_multicore_to_do):
-#         results_multiview += Parallel(n_jobs=nb_cores)(
-#             delayed(exec_multiview_multicore)(directory, core_index, args["name"],
-#                                               classification_indices, k_folds,
-#                                               args["file_type"], args["Base"]["pathf"],
-#                                               labels_dictionary, random_state,
-#                                               labels,
-#                                               hyper_param_search=hyper_param_search,
-#                                               metrics=metrics,
-#                                               n_iter=args["hps_iter"],
-#                                               **
-#                                              argument_dictionaries["multiview"][
-#                                                  step_index * nb_cores + core_index])
-#             for core_index in
-#             range(min(nb_cores, nb_experiments - step_index * nb_cores)))
-#     logging.debug("Done:\t multiview benchmark")
-#
-#     return [flag, results_monoview + results_multiview]
-
-
-def exec_one_benchmark_mono_core(dataset_var=None, labels_dictionary=None,
-                                 directory=None, classification_indices=None,
-                                 args=None,
-                                 k_folds=None, random_state=None,
-                                 hyper_param_search=None, metrics=None,
-                                 argument_dictionaries=None,
-                                 benchmark=None, views=None, views_indices=None,
-                                 flag=None, labels=None,
-                                 track_tracebacks=False):  # pragma: no cover
-    results_monoview, labels_names = benchmark_init(directory,
-                                                    classification_indices,
-                                                    labels,
-                                                    labels_dictionary, k_folds,
-                                                    dataset_var)
-    logging.getLogger('matplotlib.font_manager').disabled = True
-    logging.debug("Start:\t monoview benchmark")
-    traceback_outputs = {}
-    for arguments in argument_dictionaries["monoview"]:
-        try:
-            X = dataset_var.get_v(arguments["view_index"])
-            Y = dataset_var.get_labels()
-            results_monoview += [
-                exec_monoview(directory, X, Y, args["name"], labels_names,
-                              classification_indices, k_folds,
-                              1, args["file_type"], args["pathf"], random_state,
-                              hyper_param_search=hyper_param_search,
-                              metrics=metrics,
-                              **arguments)]
-        except:
-            if track_tracebacks:
-                traceback_outputs[
-                    arguments["classifier_name"] + "-" + arguments[
-                        "view_name"]] = traceback.format_exc()
-            else:
-                raise
-
-    logging.debug("Done:\t monoview benchmark")
-
-    logging.debug("Start:\t multiview arguments initialization")
-
-    # argument_dictionaries = initMultiviewArguments(args, benchmark, views,
-    #                                               views_indices,
-    #                                               argument_dictionaries,
-    #                                               random_state, directory,
-    #                                               resultsMonoview,
-    #                                               classification_indices)
-    logging.debug("Done:\t multiview arguments initialization")
-
-    logging.debug("Start:\t multiview benchmark")
-    results_multiview = []
-    for arguments in argument_dictionaries["multiview"]:
-        try:
-            results_multiview += [
-                exec_multiview(directory, dataset_var, args["name"],
-                               classification_indices,
-                               k_folds, 1, args["file_type"],
-                               args["pathf"], labels_dictionary, random_state,
-                               labels,
-                               hps_method=hyper_param_search,
-                               metrics=metrics, n_iter=args["hps_iter"],
-                               **arguments)]
-        except:
-            if track_tracebacks:
-                traceback_outputs[
-                    arguments["classifier_name"]] = traceback.format_exc()
-            else:
-                raise
-    logging.debug("Done:\t multiview benchmark")
-
-    return [flag, results_monoview + results_multiview, traceback_outputs]
-
-
-def exec_benchmark(nb_cores, stats_iter,
-                   benchmark_arguments_dictionaries,
-                   directory, metrics, dataset_var, track_tracebacks,
-                   exec_one_benchmark_mono_core=exec_one_benchmark_mono_core,
-                   analyze=analyze, delete=delete_HDF5,
-                   analyze_iterations=analyze_iterations):  # pragma: no cover
-    r"""Used to execute the needed benchmark(s) on multicore or mono-core functions.
-
-    Parameters
-    ----------
-    nb_cores : int
-        Number of threads that the benchmarks can use.
-    stats_iter : int
-        Number of statistical iterations that have to be done.
-    benchmark_arguments_dictionaries : list of dictionaries
-        All the needed arguments for the benchmarks.
-    classification_indices : list of lists of numpy.ndarray
-        For each statistical iteration a couple of numpy.ndarrays is stored with the indices for the training set and
-        the ones of the testing set.
-    directories : list of strings
-        List of the paths to the result directories for each statistical iteration.
-    directory : string
-        Path to the main results directory.
-    multi_class_labels : ist of lists of numpy.ndarray
-        For each label couple, for each statistical iteration a triplet of numpy.ndarrays is stored with the
-        indices for the biclass training set, the ones for the biclass testing set and the ones for the
-        multiclass testing set.
-    metrics : list of lists
-        metrics that will be used to evaluate the algorithms performance.
-    labels_dictionary : dictionary
-        Dictionary mapping labels indices to labels names.
-    nb_labels : int
-        Total number of different labels in the dataset.
-    dataset_var : HDF5 dataset file
-        The full dataset that wil be used by the benchmark.
-    classifiers_names : list of strings
-        List of the benchmarks's monoview classifiers names.
-    rest_of_the_args :
-        Just used for testing purposes
-
-
-    Returns
-    -------
-    results : list of lists
-        The results of the benchmark.
-    """
-    logging.debug("Start:\t Executing all the needed benchmarks")
-    results = []
-    # if nb_cores > 1:
-    #     if stats_iter > 1 or nb_multiclass > 1:
-    #         nb_exps_to_do = len(benchmark_arguments_dictionaries)
-    #         nb_multicore_to_do = range(int(math.ceil(float(nb_exps_to_do) / nb_cores)))
-    #         for step_index in nb_multicore_to_do:
-    #             results += (Parallel(n_jobs=nb_cores)(delayed(exec_one_benchmark)
-    #                                                  (core_index=core_index,
-    #                                                   **
-    #                                                   benchmark_arguments_dictionaries[
-    #                                                       core_index + step_index * nb_cores])
-    #                                                  for core_index in range(
-    #                 min(nb_cores, nb_exps_to_do - step_index * nb_cores))))
-    #     else:
-    #         results += [exec_one_benchmark_multicore(nb_cores=nb_cores, **
-    #         benchmark_arguments_dictionaries[0])]
-    # else:
-    for arguments in benchmark_arguments_dictionaries:
-        benchmark_results = exec_one_benchmark_mono_core(
-            dataset_var=dataset_var,
-            track_tracebacks=track_tracebacks,
-            **arguments)
-        analyze_iterations([benchmark_results],
-                           benchmark_arguments_dictionaries, stats_iter,
-                           metrics, example_ids=dataset_var.example_ids,
-                           labels=dataset_var.get_labels())
-        results += [benchmark_results]
-    logging.debug("Done:\t Executing all the needed benchmarks")
-
-    # Do everything with flagging
-    logging.debug("Start:\t Analyzing predictions")
-    results_mean_stds = analyze(results, stats_iter,
-                                benchmark_arguments_dictionaries,
-                                metrics,
-                                directory,
-                                dataset_var.example_ids,
-                                dataset_var.get_labels())
-    logging.debug("Done:\t Analyzing predictions")
-    delete(benchmark_arguments_dictionaries, nb_cores, dataset_var)
-    return results_mean_stds
-
-
-def exec_classif(arguments):  # pragma: no cover
-    """
-    Runs the benchmark with the given arguments
-
-    Parameters
-    ----------
-    arguments :
-
-    Returns
-    -------
-
-
-    >>> exec_classif([--config_path, /path/to/config/files/])
-    >>> 
-    """
-    start = time.time()
-    args = execution.parse_the_args(arguments)
-    args = configuration.get_the_args(args.config_path)
-    os.nice(args["nice"])
-    nb_cores = args["nb_cores"]
-    if nb_cores == 1:
-        os.environ['OPENBLAS_NUM_THREADS'] = '1'
-    stats_iter = args["stats_iter"]
-    hps_method = args["hps_type"]
-    hps_kwargs = args["hps_args"]
-    cl_type = args["type"]
-    monoview_algos = args["algos_monoview"]
-    multiview_algos = args["algos_multiview"]
-    path, dataset_list = execution.find_dataset_names(args["pathf"],
-                                                args["file_type"],
-                                                args["name"])
-    args["pathf"] = path
-    for dataset_name in dataset_list:
-        # noise_results = []
-        # for noise_std in args["noise_std"]:
-
-        directory = execution.init_log_file(dataset_name, args["views"],
-                                            args["file_type"],
-                                            args["log"], args["debug"],
-                                            args["label"],
-                                            args["res_dir"],
-                                            args)
-
-        random_state = execution.init_random_state(args["random_state"],
-                                                   directory)
-        stats_iter_random_states = execution.init_stats_iter_random_states(
-            stats_iter,
-            random_state)
-
-        get_database = execution.get_database_function(dataset_name,
-                                                       args["file_type"])
-
-        dataset_var, labels_dictionary, datasetname = get_database(
-            args["views"],
-            args["pathf"], dataset_name,
-            args["nb_class"],
-            args["classes"],
-            random_state,
-            args["full"],
-            )
-        args["name"] = datasetname
-        splits = execution.gen_splits(dataset_var.get_labels(),
-                                      args["split"],
-                                      stats_iter_random_states)
-
-        # multiclass_labels, labels_combinations, indices_multiclass = multiclass.gen_multiclass_labels(
-        #     dataset_var.get_labels(), multiclass_method, splits)
-
-        k_folds = execution.gen_k_folds(stats_iter, args["nb_folds"],
-                                        stats_iter_random_states)
-
-        dataset_files = dataset.init_multiple_datasets(args["pathf"],
-                                                       args["name"],
-                                                       nb_cores)
-
-        views, views_indices, all_views = execution.init_views(dataset_var,
-                                                               args[
-                                                                   "views"])
-        views_dictionary = dataset_var.get_view_dict()
-        nb_views = len(views)
-        nb_class = dataset_var.get_nb_class()
-
-        metrics = args["metrics"]
-        if metrics == "all":
-            metrics_names = [name for _, name, isPackage
-                             in pkgutil.iter_modules(
-                    [os.path.join(os.path.dirname(
-                        os.path.dirname(os.path.realpath(__file__))),
-                                  'metrics')]) if
-                             not isPackage and name not in ["framework",
-                                                            "log_loss",
-                                                            "matthews_corrcoef",
-                                                            "roc_auc_score"]]
-            metrics = dict((metric_name, {})
-                           for metric_name in metrics_names)
-        metrics = arange_metrics(metrics, args["metric_princ"])
-
-        benchmark = init_benchmark(cl_type, monoview_algos, multiview_algos,)
-        init_kwargs = init_kwargs_func(args, benchmark)
-        data_base_time = time.time() - start
-        argument_dictionaries = init_argument_dictionaries(
-            benchmark, views_dictionary,
-            nb_class, init_kwargs, hps_method, hps_kwargs)
-        # argument_dictionaries = initMonoviewExps(benchmark, viewsDictionary,
-        #                                         NB_CLASS, initKWARGS)
-        directories = execution.gen_direcorties_names(directory, stats_iter)
-        benchmark_argument_dictionaries = execution.gen_argument_dictionaries(
-            labels_dictionary, directories,
-            splits,
-            hps_method, args, k_folds,
-            stats_iter_random_states, metrics,
-            argument_dictionaries, benchmark,
-            views, views_indices)
-        results_mean_stds = exec_benchmark(
-            nb_cores, stats_iter,
-            benchmark_argument_dictionaries, directory, metrics,
-            dataset_var,
-            args["track_tracebacks"])
-            # noise_results.append([noise_std, results_mean_stds])
-            # plot_results_noise(directory, noise_results, metrics[0][0],
-            #                    dataset_name)
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py b/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py
deleted file mode 100644
index 4a7ca0b0f318e8483b6bc7cb464621ea27257f05..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-__version__ = "0.0.0.0"
-"""
-To be able to add another metric to the benchmark you must :
-
-Create a .py file named after the metric
-Define a score function
-    Input :
-        y_true : np array with the real labels
-        y_pred : np array with the predicted labels
-        kwargs : every argument that is specific to the metric
-    Returns:
-        score : the metric's score (float)
-Define a get_scorer function
-    Input :
-        kwargs : every argument that is specific to the metric
-    Returns :
-        scorer : an object similar to an sk-learn scorer
-Define a getConfig function
-    Input :
-        kwargs : every argument that is specific to the metric
-    Output :
-        config_string : A string that gives the name of the metric and explains how it is configured. Must end by
-                        (lower is better) or (higher is better) to be able to analyze the preds
-"""
-
-import os
-
-for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
-    if module in ['__init__.py'] or module[-3:] != '.py':
-        continue
-    __import__(module[:-3], locals(), globals(), [], 1)
-    pass
-del os
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py
deleted file mode 100644
index e9faae69ed7dd7c8a33dabbd43da6f78a80b7ab7..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/accuracy_score.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Functions :
- score: to get the accuracy score
- get_scorer: returns a sklearn scorer for grid search
-"""
-
-import warnings
-
-from sklearn.metrics import accuracy_score as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the accuracy_score module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    """Arguments:
-    y_true: real labels
-    y_pred: predicted labels
-
-    Keyword Arguments:
-    "0": weights to compute accuracy
-
-    Returns:
-    Weighted accuracy score for y_true, y_pred"""
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    """Keyword Arguments:
-    "0": weights to compute accuracy
-
-    Returns:
-    A weighted sklearn scorer for accuracy"""
-    return make_scorer(metric, greater_is_better=True,
-                       **kwargs)
-
-
-def get_config(**kwargs):
-    config_string = "Accuracy score using {}, (higher is better)".format(kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py
deleted file mode 100644
index 6b9b89df0e5556ea89617f558d309e113fbf47d0..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/f1_score.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Functions :
- score: to get the f1 score
- get_scorer: returns a sklearn scorer for grid search
-"""
-
-import warnings
-
-from sklearn.metrics import f1_score as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the f1_score module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=True, average='micro', **kwargs):
-    score = metric(y_true, y_pred, average=average, **kwargs)
-    return score
-
-
-def get_scorer(average="micro", **kwargs):
-    return make_scorer(metric, greater_is_better=True, average=average,
-                       **kwargs)
-
-
-def get_config(average="micro", **kwargs, ):
-    config_string = "F1 score using average: {}, {} (higher is better)".format(
-        average, kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py
deleted file mode 100644
index 60a5141aa538ad4d204a705c18085de876066173..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/fbeta_score.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import warnings
-
-from sklearn.metrics import fbeta_score as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the fbeta_score module is deprecated", DeprecationWarning,
-              stacklevel=2)
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, beta=2.0, average="micro", **kwargs):
-    score = metric(y_true, y_pred, beta=beta, average=average, **kwargs)
-    return score
-
-
-def get_scorer(beta=2.0, average="micro", **kwargs):
-    return make_scorer(metric, greater_is_better=True, beta=beta,
-                       average=average, **kwargs)
-
-
-def get_config(beta=2.0, average="micro", **kwargs):
-    config_string = "F-beta score using beta: {}, average: {}, {} (higher is better)".format(
-        beta, average, kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py
deleted file mode 100644
index 665dd243721d3d93e121b7d010f21c44dc3c528c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/hamming_loss.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import warnings
-
-from sklearn.metrics import hamming_loss as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the hamming_loss module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=False, **kwargs)
-
-
-def get_config(**kwargs):
-    config_string = "Hamming loss using {} (lower is better)".format(kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_score.py
deleted file mode 100644
index 248ec66d70b0d46ae8bc4ffbbe624a22008eebc6..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/jaccard_score.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import warnings
-
-from sklearn.metrics import jaccard_score as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the jaccard_similarity_score module  is deprecated",
-              DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=True,
-                       **kwargs)
-
-
-def get_config(**kwargs):
-    config_string = "Jaccard_similarity score using {} (higher is better)".format(
-        kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py
deleted file mode 100644
index 2b5ab917d973e9a1e62437ea497c0a40d75b81e3..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/log_loss.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import warnings
-
-from sklearn.metrics import log_loss as metric
-from sklearn.metrics import make_scorer
-
-warnings.warn("the log_loss module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=False,
-                       **kwargs)
-
-
-def get_config(**kwargs):
-    config_string = "Log loss using {} (lower is better)".format(kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py b/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py
deleted file mode 100644
index b3b8ec6c125a867cf3a1c4a1f9b41b51ed4129c8..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/matthews_corrcoef.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import warnings
-
-from sklearn.metrics import make_scorer
-from sklearn.metrics import matthews_corrcoef as metric
-
-warnings.warn("the matthews_corrcoef module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=True)
-
-
-def get_config(**kwargs):
-    config_string = "Matthews correlation coefficient (higher is better)"
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py
deleted file mode 100644
index d1c861f91a39441a961ff2ff2ef3e79aafbe060e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/precision_score.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import warnings
-
-from sklearn.metrics import make_scorer
-from sklearn.metrics import precision_score as metric
-
-warnings.warn("the precision_score module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, average='micro', multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, average=average, **kwargs)
-    return score
-
-
-def get_scorer(average='micro', **kwargs):
-    return make_scorer(metric, greater_is_better=True,
-                       average=average, **kwargs)
-
-
-def get_config(average='micro', **kwargs):
-    config_string = "Precision score using average: {}, {} (higher is better)".format(
-        average, kwargs)
-    return config_string
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py
deleted file mode 100644
index 261261990b060b3b759e6013647f3285fd9c9e2c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/recall_score.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import warnings
-
-from sklearn.metrics import make_scorer
-from sklearn.metrics import recall_score as metric
-
-warnings.warn("the recall_score module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, average='micro', **kwargs):
-    score = metric(y_true, y_pred, average=average, **kwargs)
-    return score
-
-
-def get_scorer(average='micro', **kwargs):
-    return make_scorer(metric, greater_is_better=True,
-                       average=average, **kwargs)
-
-
-def get_config(average="micro", **kwargs):
-    configString = "Recall score using average: {}, {} (higher is better)".format(
-        average, kwargs)
-    return configString
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py b/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py
deleted file mode 100644
index ae21428b347caef47dc3bcc596404ea6d85c5dd5..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/roc_auc_score.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import warnings
-
-from sklearn.metrics import make_scorer
-from sklearn.metrics import roc_auc_score as metric
-from sklearn.preprocessing import MultiLabelBinarizer
-
-warnings.warn("the roc_auc_score module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=True,
-                       **kwargs)
-
-
-def get_config(**kwargs):
-    configString = "ROC_AUC score using {}".format(kwargs)
-    return configString
diff --git a/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py b/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py
deleted file mode 100644
index e3a3449247edf934251ddbc4dbb8283bbf632746..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/metrics/zero_one_loss.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import warnings
-
-from sklearn.metrics import make_scorer
-from sklearn.metrics import zero_one_loss as metric
-
-warnings.warn("the zero_one_loss module  is deprecated", DeprecationWarning,
-              stacklevel=2)
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def score(y_true, y_pred, multiclass=False, **kwargs):
-    score = metric(y_true, y_pred, **kwargs)
-    return score
-
-
-def get_scorer(**kwargs):
-    return make_scorer(metric, greater_is_better=False,
-                       **kwargs)
-
-
-def get_config(**kwargs):
-    configString = "Zero_one loss using {} (lower is better)".format(kwargs)
-    return configString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/__init__.py b/multiview_platform/mono_multi_view_classifiers/monoview/__init__.py
deleted file mode 100644
index e94c149514edbf920daebd101e425a0e22c03d02..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# from . import ExecClassifMonoView, MonoviewUtils, analyzeResult
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py b/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
deleted file mode 100644
index eed469493e0bb863b2940683a022678675ca15bb..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview/exec_classif_mono_view.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/env python
-
-""" Execution: Script to perform a MonoView classification """
-
-import logging  # To create Log-Files
-# Import built-in modules
-import os  # to geth path of the running script
-import time  # for time calculations
-
-import h5py
-# Import 3rd party modules
-import numpy as np  # for reading CSV-files and Series
-
-from .monoview_utils import MonoviewResult, MonoviewResultAnalyzer
-# Import own modules
-from .. import monoview_classifiers
-from ..utils import hyper_parameter_search
-from ..utils.dataset import extract_subset, HDF5Dataset
-from ..utils.multiclass import get_mc_estim
-from ..utils.organization import secure_file_path
-
-# Author-Info
-__author__ = "Nikolas Huelsmann, Baptiste BAUVIN"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-# __date__ = 2016 - 03 - 25
-
-
-def exec_monoview_multicore(directory, name, labels_names,
-                            classification_indices,
-                            k_folds, dataset_file_index, database_type,
-                            path, random_state, labels,
-                            hyper_param_search="randomized_search",
-                            metrics=[["accuracy_score", None]], n_iter=30,
-                            **args): # pragma: no cover
-    dataset_var = HDF5Dataset(
-        hdf5_file=h5py.File(path + name + str(dataset_file_index) + ".hdf5",
-                            "r"))
-    neededViewIndex = args["view_index"]
-    X = dataset_var.get_v(neededViewIndex)
-    Y = labels
-    return exec_monoview(directory, X, Y, name, labels_names,
-                         classification_indices, k_folds, 1, database_type,
-                         path,
-                         random_state, hyper_param_search=hyper_param_search,
-                         metrics=metrics, n_iter=n_iter,
-                         view_name=dataset_var.get_view_name(
-                             args["view_index"]),
-                         **args)
-
-
-def exec_monoview(directory, X, Y, database_name, labels_names, classification_indices,
-                  k_folds, nb_cores, databaseType, path,
-                  random_state, hyper_param_search="Random",
-                  metrics={"accuracy_score*":{}}, n_iter=30, view_name="",
-                  hps_kwargs={}, **args):
-    logging.debug("Start:\t Loading data")
-    kwargs, \
-    t_start, \
-    view_name, \
-    classifier_name, \
-    X, \
-    learningRate, \
-    labelsString, \
-    output_file_name,\
-    directory,\
-    base_file_name = init_constants(args, X, classification_indices,
-                                      labels_names,
-                                      database_name, directory, view_name, )
-    logging.debug("Done:\t Loading data")
-
-    logging.debug(
-        "Info:\t Classification - Database:" + str(database_name) + " View:" + str(
-            view_name) + " train ratio:"
-        + str(learningRate) + ", CrossValidation k-folds: " + str(
-            k_folds.n_splits) + ", cores:"
-        + str(nb_cores) + ", algorithm : " + classifier_name)
-
-    logging.debug("Start:\t Determine Train/Test split")
-    X_train, y_train, X_test, y_test = init_train_test(X, Y,
-                                                       classification_indices)
-
-    logging.debug("Info:\t Shape X_train:" + str(
-        X_train.shape) + ", Length of y_train:" + str(len(y_train)))
-    logging.debug("Info:\t Shape X_test:" + str(
-        X_test.shape) + ", Length of y_test:" + str(len(y_test)))
-    logging.debug("Done:\t Determine Train/Test split")
-
-    logging.debug("Start:\t Generate classifier args")
-    classifier_module = getattr(monoview_classifiers, classifier_name)
-    classifier_class_name = classifier_module.classifier_class_name
-    hyper_param_beg = time.monotonic()
-    cl_kwargs = get_hyper_params(classifier_module, hyper_param_search,
-                                                   classifier_name,
-                                                   classifier_class_name,
-                                                   X_train, y_train,
-                                                   random_state, output_file_name,
-                                                   k_folds, nb_cores, metrics, kwargs,
-                                 **hps_kwargs)
-    hyper_param_duration = time.monotonic() - hyper_param_beg
-    logging.debug("Done:\t Generate classifier args")
-
-    logging.debug("Start:\t Training")
-
-    classifier = get_mc_estim(getattr(classifier_module,
-                                      classifier_class_name)
-                              (random_state, **cl_kwargs),
-                              random_state,
-                              y=Y)
-    fit_beg = time.monotonic()
-    classifier.fit(X_train, y_train)  # NB_CORES=nbCores,
-    fit_duration = time.monotonic() - fit_beg
-    logging.debug("Done:\t Training")
-
-    logging.debug("Start:\t Predicting")
-    train_pred = classifier.predict(X_train)
-    pred_beg = time.monotonic()
-    test_pred = classifier.predict(X_test)
-    pred_duration = time.monotonic() - pred_beg
-
-    # Filling the full prediction in the right order
-    full_pred = np.zeros(Y.shape, dtype=int) - 100
-    for trainIndex, index in enumerate(classification_indices[0]):
-        full_pred[index] = train_pred[trainIndex]
-    for testIndex, index in enumerate(classification_indices[1]):
-        full_pred[index] = test_pred[testIndex]
-
-    logging.debug("Done:\t Predicting")
-
-    whole_duration = time.monotonic() - t_start
-    logging.debug(
-        "Info:\t Duration for training and predicting: " + str(whole_duration) + "[s]")
-
-    logging.debug("Start:\t Getting results")
-    result_analyzer = MonoviewResultAnalyzer(view_name=view_name,
-                                             classifier_name=classifier_name,
-                                             shape=X.shape,
-                                             classifier=classifier,
-                                             classification_indices=classification_indices,
-                                             k_folds=k_folds,
-                                             hps_method=hyper_param_search,
-                                             metrics_dict=metrics,
-                                             n_iter=n_iter,
-                                             class_label_names=labels_names,
-                                             pred=full_pred,
-                                             directory=directory,
-                                             base_file_name=base_file_name,
-                                             labels=Y,
-                                             database_name=database_name,
-                                             nb_cores=nb_cores,
-                                             duration=whole_duration)
-    string_analysis, images_analysis, metrics_scores, class_metrics_scores, \
-    confusion_matrix = result_analyzer.analyze()
-    logging.debug("Done:\t Getting results")
-
-    logging.debug("Start:\t Saving preds")
-    save_results(string_analysis, output_file_name, full_pred, train_pred,
-                 y_train, images_analysis, y_test, confusion_matrix)
-    logging.info("Done:\t Saving results")
-
-    view_index = args["view_index"]
-    return MonoviewResult(view_index, classifier_name, view_name,
-                          metrics_scores, full_pred, cl_kwargs,
-                          classifier, X_train.shape[1],
-                          hyper_param_duration, fit_duration, pred_duration, class_metrics_scores)
-
-
-def init_constants(args, X, classification_indices, labels_names,
-                   name, directory, view_name):
-    try:
-        kwargs = args["args"]
-    except KeyError:
-        kwargs = args
-    t_start = time.monotonic()
-    cl_type = kwargs["classifier_name"]
-    learning_rate = float(len(classification_indices[0])) / (
-            len(classification_indices[0]) + len(classification_indices[1]))
-    labels_string = "-".join(labels_names)
-    cl_type_string = cl_type
-    directory = os.path.join(directory, cl_type_string, view_name,)
-    base_file_name = cl_type_string + '-' + name + "-" + view_name + "-"
-    output_file_name = os.path.join(directory, base_file_name)
-    secure_file_path(output_file_name)
-    return kwargs, t_start, view_name, cl_type, X, learning_rate, labels_string, output_file_name, directory, base_file_name
-
-
-def init_train_test(X, Y, classification_indices):
-    train_indices, test_indices = classification_indices
-    X_train = extract_subset(X, train_indices)
-    X_test = extract_subset(X, test_indices)
-    y_train = Y[train_indices]
-    y_test = Y[test_indices]
-    return X_train, y_train, X_test, y_test
-
-
-def get_hyper_params(classifier_module, search_method, classifier_module_name,
-                     classifier_class_name, X_train, y_train,
-                     random_state,
-                     output_file_name, k_folds, nb_cores, metrics, kwargs,
-                     **hps_kwargs):
-    if search_method != "None":
-        logging.debug(
-            "Start:\t " + search_method + " best settings for " + classifier_module_name)
-        classifier_hp_search = getattr(hyper_parameter_search, search_method)
-        estimator = getattr(classifier_module, classifier_class_name)(
-                    random_state=random_state,
-                    **kwargs[classifier_module_name])
-        estimator = get_mc_estim(estimator, random_state,
-                                 multiview=False, y=y_train)
-        hps = classifier_hp_search(estimator, scoring=metrics, cv=k_folds,
-                                   random_state=random_state,
-                                   framework="monoview", n_jobs=nb_cores,
-                                   **hps_kwargs)
-        hps.fit(X_train, y_train, **kwargs[classifier_module_name])
-        cl_kwargs = hps.get_best_params()
-        hps.gen_report(output_file_name)
-        logging.debug("Done:\t " + search_method + " best settings")
-    else:
-        cl_kwargs = kwargs[classifier_module_name]
-    return cl_kwargs
-
-
-def save_results(string_analysis, output_file_name, full_labels_pred,
-                 y_train_pred,
-                 y_train, images_analysis, y_test, confusion_matrix): # pragma: no cover
-    logging.info(string_analysis)
-    output_text_file = open(output_file_name + 'summary.txt', 'w', encoding="utf-8")
-    output_text_file.write(string_analysis)
-    output_text_file.close()
-    np.savetxt(output_file_name+"confusion_matrix.csv", confusion_matrix,
-               delimiter=', ')
-    np.savetxt(output_file_name + "full_pred.csv",
-               full_labels_pred.astype(np.int16), delimiter=",")
-    np.savetxt(output_file_name + "train_pred.csv",
-               y_train_pred.astype(np.int16),
-               delimiter=",")
-    np.savetxt(output_file_name + "train_labels.csv", y_train.astype(np.int16),
-               delimiter=",")
-    np.savetxt(output_file_name + "test_labels.csv", y_test.astype(np.int16),
-               delimiter=",")
-
-    if images_analysis is not None:
-        for image_name in images_analysis:
-            if os.path.isfile(output_file_name + image_name + ".png"):
-                for i in range(1, 20):
-                    test_file_name = output_file_name + image_name + "-" + str(
-                        i) + ".png"
-                    if not os.path.isfile(test_file_name):
-                        images_analysis[image_name].savefig(test_file_name,
-                                                            transparent=True)
-                        break
-
-            images_analysis[image_name].savefig(
-                output_file_name + image_name + '.png', transparent=True)
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py b/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
deleted file mode 100644
index dcecfa6fcbfd9ff82281831f6c17aa55b2acbef2..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview/monoview_utils.py
+++ /dev/null
@@ -1,231 +0,0 @@
-import pickle
-import os
-import matplotlib.pyplot as plt
-import numpy as np
-from matplotlib.ticker import FuncFormatter
-from scipy.stats import uniform, randint
-
-from ..utils.base import BaseClassifier, ResultAnalyser
-from ..utils.hyper_parameter_search import CustomRandint, CustomUniform
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-# __date__ = 2016 - 03 - 25
-
-def change_label_to_minus(y):
-    """
-    Change the label 0 to minus one
-
-    Parameters
-    ----------
-    y :
-
-    Returns
-    -------
-    label y with -1 instead of 0
-
-    """
-    minus_y = np.copy(y)
-    minus_y[np.where(y == 0)] = -1
-    return minus_y
-
-
-def change_label_to_zero(y):
-    """
-    Change the label -1 to 0
-
-    Parameters
-    ----------
-    y
-
-    Returns
-    -------
-
-    """
-    zeroed_y = np.copy(y)
-    zeroed_y[np.where(y == -1)] = 0
-    return zeroed_y
-
-
-def compute_possible_combinations(params_dict):
-    n_possibs = np.ones(len(params_dict)) * np.inf
-    for value_index, value in enumerate(params_dict.values()):
-        if type(value) == list:
-            n_possibs[value_index] = len(value)
-        elif isinstance(value, CustomRandint):
-            n_possibs[value_index] = value.get_nb_possibilities()
-    return n_possibs
-
-
-def gen_test_folds_preds(X_train, y_train, KFolds, estimator):
-    test_folds_preds = []
-    train_index = np.arange(len(y_train))
-    folds = KFolds.split(train_index, y_train)
-    fold_lengths = np.zeros(KFolds.n_splits, dtype=int)
-    for fold_index, (train_indices, test_indices) in enumerate(folds):
-        fold_lengths[fold_index] = len(test_indices)
-        estimator.fit(X_train[train_indices], y_train[train_indices])
-        test_folds_preds.append(estimator.predict(X_train[train_indices]))
-    min_fold_length = fold_lengths.min()
-    test_folds_preds = np.array(
-        [test_fold_preds[:min_fold_length] for test_fold_preds in
-         test_folds_preds])
-    return test_folds_preds
-
-
-# class CustomRandint:
-#     """Used as a distribution returning a integer between low and high-1.
-#     It can be used with a multiplier agrument to be able to perform more complex generation
-#     for example 10 e -(randint)"""
-#
-#     def __init__(self, low=0, high=0, multiplier=""):
-#         self.randint = randint(low, high)
-#         self.multiplier = multiplier
-#
-#     def rvs(self, random_state=None):
-#         randinteger = self.randint.rvs(random_state=random_state)
-#         if self.multiplier == "e-":
-#             return 10 ** -randinteger
-#         else:
-#             return randinteger
-#
-#     def get_nb_possibilities(self):
-#         return self.randint.b - self.randint.a
-#
-#
-# class CustomUniform:
-#     """Used as a distribution returning a float between loc and loc + scale..
-#         It can be used with a multiplier agrument to be able to perform more complex generation
-#         for example 10 e -(float)"""
-#
-#     def __init__(self, loc=0, state=1, multiplier=""):
-#         self.uniform = uniform(loc, state)
-#         self.multiplier = multiplier
-#
-#     def rvs(self, random_state=None):
-#         unif = self.uniform.rvs(random_state=random_state)
-#         if self.multiplier == 'e-':
-#             return 10 ** -unif
-#         else:
-#             return unif
-
-
-class BaseMonoviewClassifier(BaseClassifier):
-
-    def get_feature_importance(self, directory, base_file_name, nb_considered_feats=50):
-        """Used to generate a graph and a pickle dictionary representing
-        feature importances"""
-        feature_importances = self.feature_importances_
-        sorted_args = np.argsort(-feature_importances)
-        feature_importances_sorted = feature_importances[sorted_args][
-                                     :nb_considered_feats]
-        feature_indices_sorted = sorted_args[:nb_considered_feats]
-        fig, ax = plt.subplots()
-        x = np.arange(len(feature_indices_sorted))
-        formatter = FuncFormatter(percent)
-        ax.yaxis.set_major_formatter(formatter)
-        plt.bar(x, feature_importances_sorted)
-        plt.title("Importance depending on feature")
-        fig.savefig(os.path.join(directory, base_file_name + "feature_importances.png")
-                                 , transparent=True)
-        plt.close()
-        features_importances_dict = dict((featureIndex, featureImportance)
-                                         for featureIndex, featureImportance in
-                                         enumerate(feature_importances)
-                                         if featureImportance != 0)
-        with open(directory + 'feature_importances.pickle', 'wb') as handle:
-            pickle.dump(features_importances_dict, handle)
-        interpret_string = "Feature importances : \n"
-        for featureIndex, featureImportance in zip(feature_indices_sorted,
-                                                   feature_importances_sorted):
-            if featureImportance > 0:
-                interpret_string += "- Feature index : " + str(featureIndex) + \
-                                    ", feature importance : " + str(
-                    featureImportance) + "\n"
-        return interpret_string
-
-    def get_name_for_fusion(self):
-        return self.__class__.__name__[:4]
-
-
-def percent(x, pos):
-    """Used to print percentage of importance on the y axis"""
-    return '%1.1f %%' % (x * 100)
-
-
-class MonoviewResult(object):
-    def __init__(self, view_index, classifier_name, view_name, metrics_scores,
-                 full_labels_pred, classifier_config,
-                 classifier, n_features, hps_duration, fit_duration,
-                 pred_duration, class_metric_scores):
-        self.view_index = view_index
-        self.classifier_name = classifier_name
-        self.view_name = view_name
-        self.metrics_scores = metrics_scores
-        self.full_labels_pred = full_labels_pred
-        self.classifier_config = classifier_config
-        self.clf = classifier
-        self.n_features = n_features
-        self.hps_duration = hps_duration
-        self.fit_duration = fit_duration
-        self.pred_duration = pred_duration
-        self.class_metric_scores = class_metric_scores
-
-    def get_classifier_name(self):
-        return self.classifier_name + "-" + self.view_name
-
-
-def get_accuracy_graph(plotted_data, classifier_name, file_name,
-                       name="Accuracies", bounds=None, bound_name=None,
-                       boosting_bound=None, set="train", zero_to_one=True): # pragma: no cover
-    if type(name) is not str:
-        name = " ".join(name.getConfig().strip().split(" ")[:2])
-    f, ax = plt.subplots(nrows=1, ncols=1)
-    if zero_to_one:
-        ax.set_ylim(bottom=0.0, top=1.0)
-    ax.set_title(name + " during " + set + " for " + classifier_name)
-    x = np.arange(len(plotted_data))
-    scat = ax.scatter(x, np.array(plotted_data), marker=".")
-    if bounds:
-        if boosting_bound:
-            scat2 = ax.scatter(x, boosting_bound, marker=".")
-            scat3 = ax.scatter(x, np.array(bounds), marker=".", )
-            ax.legend((scat, scat2, scat3),
-                      (name, "Boosting bound", bound_name))
-        else:
-            scat2 = ax.scatter(x, np.array(bounds), marker=".", )
-            ax.legend((scat, scat2),
-                      (name, bound_name))
-        # plt.tight_layout()
-    else:
-        ax.legend((scat,), (name,))
-    f.savefig(file_name, transparent=True)
-    plt.close()
-
-
-class MonoviewResultAnalyzer(ResultAnalyser):
-
-    def __init__(self, view_name, classifier_name, shape, classifier,
-                 classification_indices, k_folds, hps_method, metrics_dict,
-                 n_iter, class_label_names, pred,
-                 directory, base_file_name, labels, database_name, nb_cores, duration):
-        ResultAnalyser.__init__(self, classifier, classification_indices,
-                                k_folds, hps_method, metrics_dict, n_iter,
-                                class_label_names, pred,
-                                directory, base_file_name, labels,
-                                database_name, nb_cores, duration)
-        self.view_name = view_name
-        self.classifier_name = classifier_name
-        self.shape = shape
-
-    def get_base_string(self):
-        return "Classification on {} for {} with {}.\n\n".format(
-            self.database_name, self.view_name, self.classifier_name
-        )
-
-    def get_view_specific_info(self):
-        return "\t- View name : {}\t View shape : {}\n".format(self.view_name,
-                                                               self.shape)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/__init__.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/__init__.py
deleted file mode 100644
index db257abe4c0afa79fa5166cdd037070aecc6a29e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-
-for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
-    if module == '__init__.py' or module[-3:] != '.py':
-        continue
-    __import__(module[:-3], locals(), globals(), [], 1)
-del module
-del os
-
-"""
-To be able to add a monoview Classifier to the benchmark, one has to :
-Create a .py file named after the classifier
-Define a canProbas function returning True or False whether the classifier is able to predict class probabilities
-Define a fit function
-    Input :
-        DATASET : The data matrix used to fit the classifier
-        CLASS_LABELS : The labels' array of the training set
-        NB_CORES : The number of cores the classifier can use to train
-        kwargs : Any argument specific to the classifier
-    Output :
-        classifier : A classifier object, similar to the sk-learn classifier object
-Define a ***Search that search hyper parameters for the algorithm. Check HP optimization methods to get all the
-different functions to provide (returning the parameters in the order of the kwargs dict for the fit function)
-Define a getKWARGS function
-    Input :
-        KWARGSList : The list of all the arguments as written in the argument parser
-    Output :
-        KWARGSDict : a dictionnary of arguments matching the kwargs needed in train
-Define a getConfig function that returns a string explaining the algorithm's config using a config dict or list
-Add the arguments to configure the classifier in the parser in exec_classif.py
-"""
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
deleted file mode 100644
index 88a042ec6d69a94bb5f64619ad5eee5e55f40339..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/adaboost.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import time
-import os
-
-import numpy as np
-from sklearn.ensemble import AdaBoostClassifier
-from sklearn.tree import DecisionTreeClassifier
-
-from .. import metrics
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
-    get_accuracy_graph
-from ..utils.base import base_boosting_estimators
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "Adaboost"
-
-
-class Adaboost(AdaBoostClassifier, BaseMonoviewClassifier):
-    """
-    This class implement a Classifier with adaboost algorithm inherit from sklearn
-    AdaBoostClassifier
-
-    Parameters
-    ----------
-
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    n_estimators : int number of estimators
-
-    base_estimator :
-
-    kwargs : others arguments
-
-
-    Attributes
-    ----------
-    param_name :
-
-    classed_params :
-
-    distribs :
-
-    weird_strings :
-
-    plotted_metric : selection of metric to plot
-
-    plotted_metric_name : name of the metric to plot
-
-    step_predictions :
-
-    """
-
-    def __init__(self, random_state=None, n_estimators=50,
-                 base_estimator=None, base_estimator_config=None, **kwargs):
-
-        base_estimator = BaseMonoviewClassifier.get_base_estimator(self,
-                                                                   base_estimator,
-                                                  base_estimator_config)
-        AdaBoostClassifier.__init__(self,
-                                    random_state=random_state,
-                                    n_estimators=n_estimators,
-                                    base_estimator=base_estimator,
-                                    algorithm="SAMME"
-                                    )
-        self.param_names = ["n_estimators", "base_estimator"]
-        self.classed_params = ["base_estimator"]
-        self.distribs = [CustomRandint(low=1, high=500),
-                        base_boosting_estimators]
-        self.weird_strings = {"base_estimator": "class_name"}
-        self.plotted_metric = metrics.zero_one_loss
-        self.plotted_metric_name = "zero_one_loss"
-        self.step_predictions = None
-
-    def fit(self, X, y, sample_weight=None):
-        """
-        Fit adaboost model
-
-        Parameters
-        ----------
-        X : {array-like, sparse matrix}, shape (n_samples, n_features)
-
-        y :  { array-like, shape (n_samples,)
-            Target values class labels in classification
-
-        sample_weight :
-
-        Returns
-        -------
-        self : object
-            Returns self.
-        """
-        begin = time.time()
-        AdaBoostClassifier.fit(self, X, y, sample_weight=sample_weight)
-        end = time.time()
-        self.train_time = end - begin
-        self.train_shape = X.shape
-        self.base_predictions = np.array(
-            [estim.predict(X) for estim in self.estimators_])
-        self.metrics = np.array([self.plotted_metric.score(pred, y) for pred in
-                                 self.staged_predict(X)])
-        return self
-
-    def predict(self, X):
-        """
-
-        Parameters
-        ----------
-        X : {array-like, sparse matrix}, shape (n_samples, n_features)
-            Training vectors, where n_samples is the number of samples
-            and n_features is the number of features.
-            For kernel="precomputed", the expected shape of X is
-            (n_samples, n_samples).
-
-        Returns
-        -------
-        predictions : ndarray of shape (n_samples, )
-            The estimated labels.
-        """
-        begin = time.time()
-        pred = AdaBoostClassifier.predict(self, X)
-        end = time.time()
-        self.pred_time = end - begin
-        self.step_predictions = np.array(
-            [step_pred for step_pred in self.staged_predict(X)])
-        return pred
-
-    def get_interpretation(self, directory, base_file_name, y_test, multi_class=False): # pragma: no cover
-        interpretString = ""
-        interpretString += self.get_feature_importance(directory, base_file_name)
-        interpretString += "\n\n Estimator error | Estimator weight\n"
-        interpretString += "\n".join(
-            [str(error) + " | " + str(weight / sum(self.estimator_weights_)) for
-             error, weight in
-             zip(self.estimator_errors_, self.estimator_weights_)])
-        step_test_metrics = np.array(
-            [self.plotted_metric.score(y_test, step_pred) for step_pred in
-             self.step_predictions])
-        get_accuracy_graph(step_test_metrics, "Adaboost",
-                           os.path.join(directory, base_file_name +"test_metrics.png"),
-                           self.plotted_metric_name, set="test")
-        np.savetxt(os.path.join(directory, base_file_name + "test_metrics.csv"),
-                   step_test_metrics,
-                   delimiter=',')
-        np.savetxt(os.path.join(directory, base_file_name + "train_metrics.csv"),
-                   self.metrics, delimiter=',')
-        np.savetxt(os.path.join(directory, base_file_name + "times.csv"),
-                   np.array([self.train_time, self.pred_time]), delimiter=',')
-        return interpretString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/additions/SVCClassifier.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/additions/SVCClassifier.py
deleted file mode 100644
index 06d6da20b104f2bba4e7efab22429c3b17440f31..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/additions/SVCClassifier.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from sklearn.svm import SVC
-
-
-class SVCClassifier(SVC):
-
-    def __init__(self, random_state=None, kernel='rbf', C=1.0, degree=3,
-                 **kwargs):
-        super(SVCClassifier, self).__init__(
-            C=C,
-            kernel=kernel,
-            degree=degree,
-            probability=True,
-            max_iter=1000,
-            random_state=random_state
-        )
-        self.classed_params = []
-        self.weird_strings = {}
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/additions/__init__.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/additions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
deleted file mode 100644
index be9f5e24716345660437eda3eb44476a982af04d..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/decision_tree.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from sklearn.tree import DecisionTreeClassifier
-
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "DecisionTree"
-
-
-class DecisionTree(DecisionTreeClassifier, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, max_depth=None,
-                 criterion='gini', splitter='best', **kwargs):
-        DecisionTreeClassifier.__init__(self,
-                                        max_depth=max_depth,
-                                        criterion=criterion,
-                                        splitter=splitter,
-                                        random_state=random_state
-                                        )
-        self.param_names = ["max_depth", "criterion", "splitter",
-                            'random_state']
-        self.classed_params = []
-        self.distribs = [CustomRandint(low=1, high=300),
-                         ["gini", "entropy"],
-                         ["best", "random"], [random_state]]
-        self.weird_strings = {}
-
-    def get_interpretation(self, directory, base_file_name, y_test,
-                           multiclass=False):
-        interpretString = "First featrue : \n\t{} <= {}\n".format(
-            self.tree_.feature[0],
-            self.tree_.threshold[0])
-        interpretString += self.get_feature_importance(directory, base_file_name)
-        return interpretString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
deleted file mode 100644
index 7136370f1c684ead6010e6a9a944da031fdf4779..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/gradient_boosting.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import time
-import os
-
-import numpy as np
-from sklearn.ensemble import GradientBoostingClassifier
-from sklearn.tree import DecisionTreeClassifier
-
-from .. import metrics
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier, \
-    get_accuracy_graph
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "GradientBoosting"
-
-
-class CustomDecisionTreeGB(DecisionTreeClassifier):
-    def predict(self, X, check_input=True):
-        y_pred = DecisionTreeClassifier.predict(self, X,
-                                                check_input=check_input)
-        return y_pred.reshape((y_pred.shape[0], 1)).astype(float)
-
-
-class GradientBoosting(GradientBoostingClassifier, BaseMonoviewClassifier):
-
-    def __init__(self, random_state=None, loss="exponential", max_depth=1.0,
-                 n_estimators=100,
-                 init=CustomDecisionTreeGB(max_depth=1),
-                 **kwargs):
-        GradientBoostingClassifier.__init__(self,
-                                            loss=loss,
-                                            max_depth=max_depth,
-                                            n_estimators=n_estimators,
-                                            init=init,
-                                            random_state=random_state
-                                            )
-        self.param_names = ["n_estimators", "max_depth"]
-        self.classed_params = []
-        self.distribs = [CustomRandint(low=50, high=500),
-                         CustomRandint(low=1, high=10),]
-        self.weird_strings = {}
-        self.plotted_metric = metrics.zero_one_loss
-        self.plotted_metric_name = "zero_one_loss"
-        self.step_predictions = None
-
-    def fit(self, X, y, sample_weight=None, monitor=None):
-        begin = time.time()
-        GradientBoostingClassifier.fit(self, X, y, sample_weight=sample_weight)
-        end = time.time()
-        self.train_time = end - begin
-        self.train_shape = X.shape
-        self.base_predictions = np.array(
-            [estim[0].predict(X) for estim in self.estimators_])
-        self.metrics = np.array(
-            [self.plotted_metric.score(pred, y) for pred in
-             self.staged_predict(X)])
-        # self.bounds = np.array([np.prod(
-        #     np.sqrt(1 - 4 * np.square(0.5 - self.estimator_errors_[:i + 1]))) for i
-        #                         in range(self.estimator_errors_.shape[0])])
-        return self
-
-    def predict(self, X):
-        begin = time.time()
-        pred = GradientBoostingClassifier.predict(self, X)
-        end = time.time()
-        self.pred_time = end - begin
-        if X.shape != self.train_shape:
-            self.step_predictions = np.array(
-                [step_pred for step_pred in self.staged_predict(X)])
-        return pred
-
-    def get_interpretation(self, directory, base_file_name, y_test, multi_class=False):
-        interpretString = ""
-        if multi_class:
-            return interpretString
-        else:
-            interpretString += self.get_feature_importance(directory, base_file_name)
-            step_test_metrics = np.array(
-                [self.plotted_metric.score(y_test, step_pred) for step_pred in
-                 self.step_predictions])
-            get_accuracy_graph(step_test_metrics, "AdaboostClassic",
-                               directory + "test_metrics.png",
-                               self.plotted_metric_name, set="test")
-            get_accuracy_graph(self.metrics, "AdaboostClassic",
-                               directory + "metrics.png",
-                               self.plotted_metric_name)
-            np.savetxt(os.path.join(directory, base_file_name + "test_metrics.csv"), step_test_metrics,
-                       delimiter=',')
-            np.savetxt(os.path.join(directory, base_file_name + "train_metrics.csv"), self.metrics,
-                       delimiter=',')
-            np.savetxt(os.path.join(directory, base_file_name + "times.csv"),
-                       np.array([self.train_time, self.pred_time]),
-                       delimiter=',')
-            return interpretString
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
deleted file mode 100644
index f3631bf6b7f281b65b0318028cbbebab89604a10..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/knn.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from sklearn.neighbors import KNeighborsClassifier
-
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "KNN"
-
-
-class KNN(KNeighborsClassifier, BaseMonoviewClassifier):
-    """
-    Implement extention of KNeighborsClassifier of sklearn
-    for the usage of the multiview_platform.
-
-    Parameters
-    ----------
-    random_state
-    n_neighbors
-    weights
-    algorithm
-    p
-    kwargs
-    """
-
-    def __init__(self, random_state=None, n_neighbors=5,
-                 weights='uniform', algorithm='auto', p=2, **kwargs):
-        KNeighborsClassifier.__init__(self,
-                                      n_neighbors=n_neighbors,
-                                      weights=weights,
-                                      algorithm=algorithm,
-                                      p=p
-                                      )
-        self.param_names = ["n_neighbors", "weights", "algorithm", "p",
-                            "random_state", ]
-        self.classed_params = []
-        self.distribs = [CustomRandint(low=1, high=10), ["uniform", "distance"],
-                         ["auto", "ball_tree", "kd_tree", "brute"], [1, 2],
-                         [random_state]]
-        self.weird_strings = {}
-        self.random_state = random_state
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
deleted file mode 100644
index c91d2355759867e18375b38d500b79439e23adc2..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/lasso.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import numpy as np
-from sklearn.linear_model import Lasso as LassoSK
-
-from ..monoview.monoview_utils import CustomRandint, CustomUniform, \
-    BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "Lasso"
-
-
-class Lasso(LassoSK, BaseMonoviewClassifier):
-    """
-
-    Parameters
-    ----------
-    random_state :
-
-    alpha : float, optional
-        Constant that multiplies the L1 term. Defaults to 1.0.
-        ``alpha = 0`` is equivalent to an ordinary least square, solved
-        by the :class:`LinearRegression` object. For numerical
-        reasons, using ``alpha = 0`` is with the Lasso object is
-        not advised
-        and you should prefer the LinearRegression object. (default( : 10)
-
-    max_iter :  int The maximum number of iterations (default : 10)
-
-    warm_start : bool, optional
-        When set to True, reuse the solution of the previous call to fit as
-        initialization, otherwise, just erase the previous solution.
-
-    kwargs : others arguments
-
-    Attributes
-    ----------
-    param_name :
-
-    classed_params :
-
-    distribs :
-
-    weird_strings :
-
-    """
-
-    def __init__(self, random_state=None, alpha=1.0,
-                 max_iter=10, warm_start=False, **kwargs):
-        LassoSK.__init__(self,
-                         alpha=alpha,
-                         max_iter=max_iter,
-                         warm_start=warm_start,
-                         random_state=random_state
-                         )
-        self.param_names = ["max_iter", "alpha", "random_state"]
-        self.classed_params = []
-        self.distribs = [CustomRandint(low=1, high=300),
-                         CustomUniform(), [random_state]]
-        self.weird_strings = {}
-
-    def fit(self, X, y, check_input=True):
-        neg_y = np.copy(y)
-        neg_y[np.where(neg_y == 0)] = -1
-        LassoSK.fit(self, X, neg_y)
-        # self.feature_importances_ = self.coef_/np.sum(self.coef_)
-        return self
-
-    def predict(self, X):
-        prediction = LassoSK.predict(self, X)
-        signed = np.sign(prediction)
-        signed[np.where(signed == -1)] = 0
-        return signed
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
deleted file mode 100644
index ece278a56e54ab88ffc078e25facee452ac54217..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/random_forest.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from sklearn.ensemble import RandomForestClassifier
-
-from ..monoview.monoview_utils import CustomRandint, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "RandomForest"
-
-
-class RandomForest(RandomForestClassifier, BaseMonoviewClassifier):
-    """RandomForest Classifier Class
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    n_estimators : int (default : 10) number of estimators
-
-    max_depth : int , optional (default :  None) maximum of depth
-
-    criterion : criteria (default : 'gini')
-
-    kwargs : others arguments
-
-
-    Attributes
-    ----------
-    param_names :
-
-    distribs :
-
-    classed_params :
-
-    weird_strings :
-
-    """
-
-    def __init__(self, random_state=None, n_estimators=10,
-                 max_depth=None, criterion='gini', **kwargs):
-        """
-
-        Parameters
-        ----------
-        random_state
-        n_estimators
-        max_depth
-        criterion
-        kwargs
-        """
-        RandomForestClassifier.__init__(self,
-                                        n_estimators=n_estimators,
-                                        max_depth=max_depth,
-                                        criterion=criterion,
-                                        random_state=random_state
-                                        )
-        self.param_names = ["n_estimators", "max_depth", "criterion",
-                            "random_state"]
-        self.classed_params = []
-        self.distribs = [CustomRandint(low=1, high=300),
-                         CustomRandint(low=1, high=10),
-                         ["gini", "entropy"], [random_state]]
-        self.weird_strings = {}
-
-    def get_interpretation(self, directory, base_file_name, y_test, multiclass=False):
-        """
-
-        Parameters
-        ----------
-        directory
-        y_test
-
-        Returns
-        -------
-        string for interpretation interpret_string
-        """
-        interpret_string = ""
-        interpret_string += self.get_feature_importance(directory, base_file_name)
-        return interpret_string
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
deleted file mode 100644
index 09c345bae7993576dcc595f10e5accc56f480a83..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/sgd.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from sklearn.linear_model import SGDClassifier
-
-from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "SGD"
-
-
-class SGD(SGDClassifier, BaseMonoviewClassifier):
-    """
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    loss : str , (default = "hinge")
-    penalty : str, (default = "l2")
-
-    alpha : float, (default = 0.0001)
-
-    kwargs : other arguments
-
-
-    Attributes
-    ----------
-    param_names :
-
-    distribs :
-
-    classed_params :
-
-    weird_strings :
-
-    """
-
-    def __init__(self, random_state=None, loss='hinge',
-                 penalty='l2', alpha=0.0001, max_iter=5, tol=None, **kwargs):
-        SGDClassifier.__init__(self,
-                               loss=loss,
-                               penalty=penalty,
-                               alpha=alpha,
-                               max_iter=5,
-                               tol=None,
-                               random_state=random_state
-                               )
-        self.param_names = ["loss", "penalty", "alpha", "random_state"]
-        self.classed_params = []
-        self.distribs = [['log', 'modified_huber'],
-                         ["l1", "l2", "elasticnet"],
-                         CustomUniform(loc=0, state=1), [random_state]]
-        self.weird_strings = {}
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
deleted file mode 100644
index e5d293a624d2df1b21b8b4702b287c563ecf0c4e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_linear.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
-    SVCClassifier
-from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "SVMLinear"
-
-
-class SVMLinear(SVCClassifier, BaseMonoviewClassifier):
-    """SVMLinear
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-
-    C : float, optional (default=1.0)
-        Penalty parameter C of the error term.
-
-    kwargs : others arguments
-
-    """
-
-    def __init__(self, random_state=None, C=1.0, **kwargs):
-        SVCClassifier.__init__(self,
-                               C=C,
-                               kernel='linear',
-                               random_state=random_state
-                               )
-        self.param_names = ["C", "random_state"]
-        self.distribs = [CustomUniform(loc=0, state=1), [random_state]]
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
deleted file mode 100644
index d93bdcc352499390b749d42f95f5d2d799b69317..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_poly.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
-    SVCClassifier
-from ..monoview.monoview_utils import CustomUniform, CustomRandint, \
-    BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "SVMPoly"
-
-
-class SVMPoly(SVCClassifier, BaseMonoviewClassifier):
-    """
-    Class of SVMPoly for SVC Classifier
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-
-    C : float, optional (default=1.0)
-        Penalty parameter C of the error term.
-
-
-    degree :
-
-    kwargs : others arguments
-
-
-    Attributes
-    ----------
-
-    param_names : list of parameters names
-
-    distribs :  list of random_state distribution
-    """
-
-    def __init__(self, random_state=None, C=1.0, degree=3, **kwargs):
-        SVCClassifier.__init__(self,
-                               C=C,
-                               kernel='poly',
-                               degree=degree,
-                               random_state=random_state
-                               )
-        self.param_names = ["C", "degree", "random_state"]
-        self.distribs = [CustomUniform(loc=0, state=1),
-                         CustomRandint(low=2, high=30), [random_state]]
diff --git a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py b/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
deleted file mode 100644
index 1af02e4d6e7fffbd35c0b2d0d554006a46b55752..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/monoview_classifiers/svm_rbf.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers.additions.SVCClassifier import \
-    SVCClassifier
-from ..monoview.monoview_utils import CustomUniform, BaseMonoviewClassifier
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-classifier_class_name = "SVMRBF"
-
-
-class SVMRBF(SVCClassifier, BaseMonoviewClassifier):
-    """
-    class SVMRBF for classifier SVCC
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    C :
-
-    kwargs : others arguments
-
-    Attributes
-    ----------
-
-    param_names : list of parameters names
-
-    distribs :  list of random_state distribution
-    """
-
-    def __init__(self, random_state=None, C=1.0, **kwargs):
-        SVCClassifier.__init__(self,
-                               C=C,
-                               kernel='rbf',
-                               random_state=random_state
-                               )
-        self.param_names = ["C", "random_state"]
-        self.distribs = [CustomUniform(loc=0, state=1), [random_state]]
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview/__init__.py
deleted file mode 100644
index a3ab07f2a12aedb7ffa60628a0103c838e9af55b..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# # from Code.mono_multi_view_classifiers.multiview_classifiers import fusion, Mumbo
-# from . import ExecMultiview
-# # from . import Mumbo
-#
-# __all__ = ['fusion', 'Mumbo']
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py b/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py
deleted file mode 100644
index c89034cf494399fc9cfa2561531192f79c93c2bd..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview/exec_multiview.py
+++ /dev/null
@@ -1,356 +0,0 @@
-import logging
-import os
-import os.path
-import time
-
-import h5py
-import numpy as np
-
-from .multiview_utils import MultiviewResult, MultiviewResultAnalyzer
-from .. import multiview_classifiers
-from ..utils import hyper_parameter_search
-from ..utils.multiclass import get_mc_estim
-from ..utils.organization import secure_file_path
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def init_constants(kwargs, classification_indices, metrics,
-                   name, nb_cores, k_folds,
-                   dataset_var, directory):
-    """
-    Used to init the constants
-    Parameters
-    ----------
-    kwargs :
-
-    classification_indices :
-
-    metrics :
-
-    name :
-
-    nb_cores : nint number of cares to execute
-
-    k_folds :
-
-    dataset_var :  {array-like} shape (n_samples, n_features)
-        dataset variable
-
-    Returns
-    -------
-    tuple of (classifier_name, t_start, views_indices,
-              classifier_config, views, learning_rate)
-    """
-    views = kwargs["view_names"]
-    views_indices = kwargs["view_indices"]
-    if metrics is None:
-        metrics = {"f1_score*":{}}
-    classifier_name = kwargs["classifier_name"]
-    classifier_config = kwargs[classifier_name]
-    learning_rate = len(classification_indices[0]) / float(
-        (len(classification_indices[0]) + len(classification_indices[1])))
-    t_start = time.time()
-    logging.info("Info\t: Classification - Database : " + str(
-        name) + " ; Views : " + ", ".join(views) +
-                 " ; Algorithm : " + classifier_name + " ; Cores : " + str(
-        nb_cores) + ", Train ratio : " + str(learning_rate) +
-                 ", CV on " + str(k_folds.n_splits) + " folds")
-
-    for view_index, view_name in zip(views_indices, views):
-        logging.info("Info:\t Shape of " + str(view_name) + " :" + str(
-            dataset_var.get_shape()))
-    labels = dataset_var.get_labels()
-    directory = os.path.join(directory, classifier_name)
-    base_file_name = classifier_name+"-"+dataset_var.get_name()+"-"
-    output_file_name = os.path.join(directory, base_file_name)
-    return classifier_name, t_start, views_indices, \
-           classifier_config, views, learning_rate, labels, output_file_name,\
-           directory, base_file_name, metrics
-
-
-def save_results(string_analysis, images_analysis, output_file_name,
-                 confusion_matrix): # pragma: no cover
-    """
-    Save results in derectory
-
-    Parameters
-    ----------
-
-    classifier : classifier class
-
-    labels_dictionary : dict dictionary of labels
-
-    string_analysis : str
-
-    views :
-
-    classifier_module : module of the classifier
-
-    classification_kargs :
-
-    directory : str directory
-
-    learning_rate :
-
-    name :
-
-    images_analysis :
-
-    """
-    logging.info(string_analysis)
-    secure_file_path(output_file_name)
-    output_text_file = open(output_file_name + 'summary.txt', 'w', encoding="utf-8")
-    output_text_file.write(string_analysis)
-    output_text_file.close()
-    np.savetxt(output_file_name+"confusion_matrix.csv", confusion_matrix,
-               delimiter=',')
-
-    if images_analysis is not None:
-        for image_name in images_analysis.keys():
-            if os.path.isfile(output_file_name + image_name + ".png"):
-                for i in range(1, 20):
-                    test_file_name = output_file_name + image_name + "-" + str(
-                        i) + ".png"
-                    if not os.path.isfile(test_file_name):
-                        images_analysis[image_name].savefig(test_file_name,
-                                                            transparent=True)
-                        break
-
-            images_analysis[image_name].savefig(
-                output_file_name + image_name + '.png', transparent=True)
-
-
-def exec_multiview_multicore(directory, core_index, name, learning_rate,
-                             nb_folds,
-                             database_type, path, labels_dictionary,
-                             random_state, labels,
-                             hyper_param_search=False, nb_cores=1, metrics=None,
-                             n_iter=30, **arguments): # pragma: no cover
-    """
-    execute multiview process on
-
-    Parameters
-    ----------
-
-    directory : indicate the directory
-
-    core_index :
-
-    name : name of the data file to perform
-
-    learning_rate :
-
-    nb_folds :
-
-    database_type :
-
-    path : path to the data name
-
-    labels_dictionary
-
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    labels :
-
-    hyper_param_search :
-
-    nb_cores : in number of cores
-
-    metrics : metric to use
-
-    n_iter : int number of iterations
-
-    arguments : others arguments
-
-    Returns
-    -------
-    exec_multiview on directory, dataset_var, name, learning_rate, nb_folds, 1,
-        database_type, path, labels_dictionary,
-        random_state, labels,
-        hyper_param_search=hyper_param_search, metrics=metrics,
-        n_iter=n_iter, **arguments
-    """
-    """Used to load an HDF5 dataset_var for each parallel job and execute multiview classification"""
-    dataset_var = h5py.File(path + name + str(core_index) + ".hdf5", "r")
-    return exec_multiview(directory, dataset_var, name, learning_rate, nb_folds,
-                          1,
-                          database_type, path, labels_dictionary,
-                          random_state, labels,
-                          hps_method=hyper_param_search,
-                          metrics=metrics,
-                          n_iter=n_iter, **arguments)
-
-
-def exec_multiview(directory, dataset_var, name, classification_indices,
-                   k_folds,
-                   nb_cores, database_type, path,
-                   labels_dictionary, random_state, labels,
-                   hps_method="None", hps_kwargs={}, metrics=None,
-                   n_iter=30, **kwargs):
-    """Used to execute multiview classification and result analysis
-
-    Parameters
-    ----------
-
-    directory : indicate the directory
-
-
-    dataset_var :
-
-    name
-
-    classification_indices
-
-    k_folds
-
-    nb_cores
-
-    database_type
-
-    path
-
-    labels_dictionary : dict dictionary of labels
-
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-
-    labels
-
-    hps_method
-
-    metrics
-
-    n_iter : int number of iterations
-
-    kwargs
-
-    Returns
-    -------
-
-    ``MultiviewResult``
-    """
-
-    logging.debug("Start:\t Initialize constants")
-    cl_type, \
-    t_start, \
-    views_indices, \
-    classifier_config, \
-    views, \
-    learning_rate, \
-    labels, \
-    output_file_name,\
-    directory,\
-    base_file_name, \
-    metrics = init_constants(kwargs, classification_indices, metrics, name,
-                            nb_cores, k_folds, dataset_var, directory)
-    logging.debug("Done:\t Initialize constants")
-
-    extraction_time = time.time() - t_start
-    logging.info("Info:\t Extraction duration " + str(extraction_time) + "s")
-
-    logging.debug("Start:\t Getting train/test split")
-    learning_indices, validation_indices = classification_indices
-    logging.debug("Done:\t Getting train/test split")
-
-    logging.debug("Start:\t Getting classifiers modules")
-    # classifierPackage = getattr(multiview_classifiers,
-    #                             CL_type)  # Permet d'appeler un module avec une string
-    classifier_module = getattr(multiview_classifiers, cl_type)
-    classifier_name = classifier_module.classifier_class_name
-    # classifierClass = getattr(classifierModule, CL_type + "Class")
-    logging.debug("Done:\t Getting classifiers modules")
-
-    logging.debug("Start:\t Optimizing hyperparameters")
-    hps_beg = time.monotonic()
-    if hps_method != "None":
-        hps_method_class = getattr(hyper_parameter_search, hps_method)
-        estimator = getattr(classifier_module, classifier_name)(
-                    random_state=random_state,
-                    **classifier_config)
-        estimator = get_mc_estim(estimator, random_state,
-                                         multiview=True,
-                                         y=dataset_var.get_labels()[learning_indices])
-        hps = hps_method_class(estimator, scoring=metrics, cv=k_folds,
-                               random_state=random_state, framework="multiview",
-                               n_jobs=nb_cores,
-                               learning_indices=learning_indices,
-                               view_indices=views_indices, **hps_kwargs)
-        hps.fit(dataset_var, dataset_var.get_labels(), )
-        classifier_config = hps.get_best_params()
-        hps.gen_report(output_file_name)
-        # classifier_config = hyper_parameter_search.search_best_settings(
-        #     dataset_var, dataset_var.get_labels(), classifier_module,
-        #     classifier_name,
-        #     metrics[0], learning_indices, k_folds, random_state,
-        #     output_file_name, nb_cores=nb_cores, views_indices=views_indices,
-        #     searching_tool=hps_method, n_iter=n_iter,
-        #     classifier_config=classifier_config)
-    hps_duration = time.monotonic() - hps_beg
-    classifier = get_mc_estim(
-        getattr(classifier_module, classifier_name)(random_state=random_state,
-                                                    **classifier_config),
-        random_state, multiview=True,
-        y=dataset_var.get_labels())
-    logging.debug("Done:\t Optimizing hyperparameters")
-    logging.debug("Start:\t Fitting classifier")
-    fit_beg = time.monotonic()
-    classifier.fit(dataset_var, dataset_var.get_labels(),
-                   train_indices=learning_indices,
-                   view_indices=views_indices)
-    fit_duration = time.monotonic() - fit_beg
-    logging.debug("Done:\t Fitting classifier")
-
-    logging.debug("Start:\t Predicting")
-    train_pred = classifier.predict(dataset_var,
-                                           example_indices=learning_indices,
-                                           view_indices=views_indices)
-    pred_beg = time.monotonic()
-    test_pred = classifier.predict(dataset_var,
-                                          example_indices=validation_indices,
-                                          view_indices=views_indices)
-    pred_duration = time.monotonic() - pred_beg
-    full_pred = np.zeros(dataset_var.get_labels().shape, dtype=int) - 100
-    full_pred[learning_indices] = train_pred
-    full_pred[validation_indices] = test_pred
-    logging.info("Done:\t Pertidcting")
-
-    whole_duration = time.time() - t_start
-    logging.info(
-        "Info:\t Classification duration " + str(extraction_time) + "s")
-
-    # TODO: get better cltype
-
-    logging.info("Start:\t Result Analysis for " + cl_type)
-    times = (extraction_time, whole_duration)
-    result_analyzer = MultiviewResultAnalyzer(view_names=views,
-                                              classifier=classifier,
-                                              classification_indices=classification_indices,
-                                              k_folds=k_folds,
-                                              hps_method=hps_method,
-                                              metrics_dict=metrics,
-                                              n_iter=n_iter,
-                                              class_label_names=list(labels_dictionary.values()),
-                                              pred=full_pred,
-                                              directory=directory,
-                                              base_file_name=base_file_name,
-                                              labels=labels,
-                                              database_name=dataset_var.get_name(),
-                                              nb_cores=nb_cores,
-                                              duration=whole_duration)
-    string_analysis, images_analysis, metrics_scores, class_metrics_scores, \
-    confusion_matrix = result_analyzer.analyze()
-    logging.info("Done:\t Result Analysis for " + cl_type)
-
-    logging.debug("Start:\t Saving preds")
-    save_results(string_analysis, images_analysis, output_file_name, confusion_matrix)
-    logging.debug("Start:\t Saving preds")
-
-    return MultiviewResult(cl_type, classifier_config, metrics_scores,
-                           full_pred, hps_duration, fit_duration,
-                           pred_duration, class_metrics_scores)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
deleted file mode 100644
index 9ad93b6c55774f57f89028560bd8f82de9e801d3..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview/multiview_utils.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from abc import abstractmethod
-
-import numpy as np
-
-from .. import monoview_classifiers
-from ..utils.base import BaseClassifier, ResultAnalyser
-from ..utils.dataset import RAMDataset, get_examples_views_indices
-
-
-# class FakeEstimator():
-#
-#     def predict(self, X, example_indices=None, view_indices=None):
-#         return np.zeros(example_indices.shape[0])
-
-
-class BaseMultiviewClassifier(BaseClassifier):
-    """
-    BaseMultiviewClassifier base of Multiview classifiers
-
-    Parameters
-    ----------
-    random_state : int seed, RandomState instance, or None (default=None)
-        The seed of the pseudo random number multiview_generator to use when
-        shuffling the data.
-    """
-
-    def __init__(self, random_state):
-
-        self.random_state = random_state
-        self.short_name = self.__module__.split(".")[-1]
-        self.weird_strings = {}
-        self.used_views = None
-
-    @abstractmethod
-    def fit(self, X, y, train_indices=None, view_indices=None): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def predict(self, X, example_indices=None, view_indices=None): # pragma: no cover
-        pass
-
-    def _check_views(self, view_indices): # pragma: no cover
-        if self.used_views is not None and not np.array_equal(np.sort(self.used_views), np.sort(view_indices)):
-            raise ValueError('Used {} views to fit, and trying to predict on {}'.format(self.used_views, view_indices))
-
-    # def to_str(self, param_name):
-    #     if param_name in self.weird_strings:
-    #         string = ""
-    #         if "class_name" in self.weird_strings[param_name]:
-    #             string += self.get_params()[param_name].__class__.__name__
-    #         if "config" in self.weird_strings[param_name]:
-    #             string += "( with " + self.get_params()[
-    #                 param_name].params_to_string() + ")"
-    #         else:
-    #             string += self.weird_strings[param_name](
-    #                 self.get_params()[param_name])
-    #         return string
-    #     else:
-    #         return str(self.get_params()[param_name])
-
-    def accepts_multi_class(self, random_state, n_samples=10, dim=2,
-                            n_classes=3, n_views=2):
-        if int(n_samples / n_classes) < 1:
-            raise ValueError(
-                "n_samples ({}) / n_classes ({}) must be over 1".format(
-                    n_samples,
-                    n_classes))
-        fake_mc_X = RAMDataset(
-            views=[random_state.randint(low=0, high=101,
-                                                size=(n_samples, dim))
-                   for i in range(n_views)],
-            labels=[class_index
-                    for _ in range(int(n_samples / n_classes))
-                    for class_index in range(n_classes)],
-            are_sparse=False,
-            name="mc_dset",
-            labels_names=[str(class_index) for class_index in range(n_classes)],
-            view_names=["V0", "V1"],
-            )
-
-        fake_mc_y = [class_index
-                     for _ in range(int(n_samples / n_classes))
-                     for class_index in range(n_classes)]
-        fake_mc_y += [0 for _ in range(n_samples % n_classes)]
-        fake_mc_y = np.asarray(fake_mc_y)
-        try:
-            self.fit(fake_mc_X, fake_mc_y)
-            return True
-        except ValueError:
-            return False
-
-
-class ConfigGenerator():
-
-    def __init__(self, classifier_names):
-        self.distribs = {}
-        for classifier_name in classifier_names:
-            classifier_class = get_monoview_classifier(classifier_name)
-            self.distribs[classifier_name] = dict((param_name, param_distrib)
-                                                  for param_name, param_distrib
-                                                  in
-                                                  zip(
-                                                      classifier_class().param_names,
-                                                      classifier_class().distribs)
-                                                  if
-                                                  param_name != "random_state")
-
-    def rvs(self, random_state=None):
-        config_sample = {}
-        for classifier_name, classifier_config in self.distribs.items():
-            config_sample[classifier_name] = {}
-            for param_name, param_distrib in classifier_config.items():
-                if hasattr(param_distrib, "rvs"):
-                    config_sample[classifier_name][
-                        param_name] = param_distrib.rvs(
-                        random_state=random_state)
-                else:
-                    config_sample[classifier_name][
-                        param_name] = param_distrib[
-                        random_state.randint(len(param_distrib))]
-        return config_sample
-
-
-def get_available_monoview_classifiers(need_probas=False):
-    available_classifiers = [module_name
-                             for module_name in dir(monoview_classifiers)
-                             if not (
-                    module_name.startswith("__") or module_name == "additions")]
-    if need_probas:
-        proba_classifiers = []
-        for module_name in available_classifiers:
-            module = getattr(monoview_classifiers, module_name)
-            classifier_class = getattr(module, module.classifier_class_name)()
-            proba_prediction = getattr(classifier_class, "predict_proba", None)
-            if callable(proba_prediction):
-                proba_classifiers.append(module_name)
-        available_classifiers = proba_classifiers
-    return available_classifiers
-
-
-def get_monoview_classifier(classifier_name, multiclass=False):
-    classifier_module = getattr(monoview_classifiers, classifier_name)
-    classifier_class = getattr(classifier_module,
-                               classifier_module.classifier_class_name)
-    return classifier_class
-
-
-from .. import multiview_classifiers
-
-
-class MultiviewResult(object):
-    def __init__(self, classifier_name, classifier_config,
-                 metrics_scores, full_labels, hps_duration, fit_duration,
-                 pred_duration, class_metric_scores):
-        self.classifier_name = classifier_name
-        self.classifier_config = classifier_config
-        self.metrics_scores = metrics_scores
-        self.full_labels_pred = full_labels
-        self.hps_duration = hps_duration
-        self.fit_duration = fit_duration
-        self.pred_duration = pred_duration
-        self.class_metric_scores = class_metric_scores
-
-    def get_classifier_name(self):
-        try:
-            multiview_classifier_module = getattr(multiview_classifiers,
-                                                  self.classifier_name)
-            multiview_classifier = getattr(multiview_classifier_module,
-                                           multiview_classifier_module.classifier_class_name)(
-                42, **self.classifier_config)
-            return multiview_classifier.short_name
-        except:
-            return self.classifier_name
-
-
-class MultiviewResultAnalyzer(ResultAnalyser):
-
-    def __init__(self, view_names, classifier, classification_indices, k_folds,
-                 hps_method, metrics_dict, n_iter, class_label_names,
-                 pred, directory, base_file_name, labels,
-                 database_name, nb_cores, duration):
-        if hps_method.endswith("equiv"):
-            n_iter = n_iter*len(view_names)
-        ResultAnalyser.__init__(self, classifier, classification_indices, k_folds,
-                                hps_method, metrics_dict, n_iter, class_label_names,
-                                pred, directory,
-                                base_file_name, labels, database_name,
-                                nb_cores, duration)
-        self.classifier_name = classifier.short_name
-        self.view_names = view_names
-
-    def get_base_string(self, ):
-        return "Multiview classification on {}  with {}\n\n".format(self.database_name,
-                                                                self.classifier_name)
-
-    def get_view_specific_info(self):
-        return "\t- Views : " + ', '.join(self.view_names) + "\n"
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview/profile b/multiview_platform/mono_multi_view_classifiers/multiview/profile
deleted file mode 100644
index 40a016510edec99a8c0e78e9ba4bf248d41b8c62..0000000000000000000000000000000000000000
Binary files a/multiview_platform/mono_multi_view_classifiers/multiview/profile and /dev/null differ
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py
deleted file mode 100644
index 6e242133fa45a01a2506f423a543c742390259be..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import os
-
-for module in os.listdir(os.path.dirname(os.path.realpath(__file__))):
-    if module == '__init__.py' or module[
-                                  -4:] == '.pyc' or module == '__pycache__' or module[
-                                                                               -3:] != '.py':
-        continue
-    __import__(module[:-3], locals(), globals(), [], 1)
-del module
-del os
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/__init__.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
deleted file mode 100644
index a49845191d950fa26026d7d5945ba5853275f199..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/diversity_utils.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import itertools
-import math
-
-import numpy as np
-
-from .fusion_utils import BaseFusionClassifier
-from ...multiview.multiview_utils import ConfigGenerator, \
-    get_available_monoview_classifiers, \
-    BaseMultiviewClassifier
-from ...utils.dataset import get_examples_views_indices
-
-
-class DiversityFusionClassifier(BaseMultiviewClassifier,
-                                BaseFusionClassifier):
-    """This is the base class for all the diversity fusion based classifiers."""
-
-    def __init__(self, random_state=None, classifier_names=None,
-                 monoview_estimators=None, classifier_configs=None):
-        """Used to init the instances"""
-        BaseMultiviewClassifier.__init__(self, random_state)
-        if classifier_names is None:
-            classifier_names = get_available_monoview_classifiers()
-        self.classifier_names = classifier_names
-        self.param_names = ["classifier_configs"]
-        self.distribs = [ConfigGenerator(get_available_monoview_classifiers())]
-        self.monoview_estimators = monoview_estimators
-        self.classifier_configs = classifier_configs
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        train_indices, view_indices = get_examples_views_indices(X,
-                                                                 train_indices,
-                                                                 view_indices)
-        self.used_views = view_indices
-        # TODO : Finer analysis, may support a bit of mutliclass
-        if np.unique(y[train_indices]).shape[0] > 2:
-            raise ValueError(
-                "Multiclass not supported, classes used : {}".format(
-                    np.unique(y[train_indices])))
-        if self.monoview_estimators is None:
-            self.monoview_estimators = []
-            for classifier_idx, classifier_name in enumerate(
-                    self.classifier_names):
-                self.monoview_estimators.append([])
-                for idx, view_idx in enumerate(view_indices):
-                    estimator = self.init_monoview_estimator(classifier_name,
-                                                             self.classifier_configs)
-                    estimator.fit(X.get_v(view_idx, train_indices),
-                                  y[train_indices])
-                    self.monoview_estimators[classifier_idx].append(estimator)
-        else:
-            pass  # TODO
-        self.choose_combination(X, y, train_indices, view_indices)
-        return self
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        """Just a weighted majority vote"""
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                   example_indices,
-                                                                   view_indices)
-        self._check_views(view_indices)
-        nb_class = X.get_nb_class()
-        if nb_class > 2:
-            nb_class = 3
-        votes = np.zeros((len(example_indices), nb_class), dtype=float)
-        monoview_predictions = [
-            monoview_estimator.predict(X.get_v(view_idx, example_indices))
-            for view_idx, monoview_estimator
-            in zip(view_indices, self.monoview_estimators)]
-        for idx, example_index in enumerate(example_indices):
-            for monoview_estimator_index, monoview_prediciton in enumerate(
-                    monoview_predictions):
-                if int(monoview_prediciton[idx]) == -100:
-                    votes[idx, 2] += 1
-                else:
-                    votes[idx, int(monoview_prediciton[idx])] += 1
-        predicted_labels = np.argmax(votes, axis=1)
-        return predicted_labels
-
-    def get_classifiers_decisions(self, X, view_indices, examples_indices):
-        classifiers_decisions = np.zeros((len(self.monoview_estimators),
-                                          len(view_indices),
-                                          len(examples_indices)))
-        for estimator_idx, estimator in enumerate(self.monoview_estimators):
-            for idx, view_index in enumerate(view_indices):
-                classifiers_decisions[estimator_idx, idx, :] = estimator[
-                    idx].predict(X.get_v(view_index, examples_indices))
-        return classifiers_decisions
-
-    def init_combinations(self, X, example_indices, view_indices):
-        classifiers_decisions = self.get_classifiers_decisions(X, view_indices,
-                                                               example_indices)
-        nb_classifiers, nb_views, n_examples = classifiers_decisions.shape
-        combinations = itertools.combinations_with_replacement(
-            range(nb_classifiers),
-            nb_views)
-        nb_combinations = int(
-            math.factorial(nb_classifiers + nb_views - 1) / math.factorial(
-                nb_views) / math.factorial(
-                nb_classifiers - 1))
-        div_measure = np.zeros(nb_combinations)
-        combis = np.zeros((nb_combinations, nb_views), dtype=int)
-        return combinations, combis, div_measure, classifiers_decisions, nb_views
-
-
-class GlobalDiversityFusionClassifier(DiversityFusionClassifier):
-
-    def choose_combination(self, X, y, examples_indices, view_indices):
-        combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
-            X, examples_indices, view_indices)
-        for combinationsIndex, combination in enumerate(combinations):
-            combis[combinationsIndex] = combination
-            div_measure[combinationsIndex] = self.diversity_measure(
-                classifiers_decisions,
-                combination,
-                y[examples_indices])
-        best_combi_index = np.argmax(div_measure)
-        best_combination = combis[best_combi_index]
-        self.monoview_estimators = [
-            self.monoview_estimators[classifier_index][view_index]
-            for view_index, classifier_index
-            in enumerate(best_combination)]
-
-
-class CoupleDiversityFusionClassifier(DiversityFusionClassifier):
-
-    def choose_combination(self, X, y, examples_indices, view_indices):
-        combinations, combis, div_measure, classifiers_decisions, nb_views = self.init_combinations(
-            X, examples_indices, view_indices)
-        for combinations_index, combination in enumerate(combinations):
-            combis[combinations_index] = combination
-            combi_with_view = [(viewIndex, combiIndex) for viewIndex, combiIndex
-                               in
-                               enumerate(combination)]
-            binomes = itertools.combinations(combi_with_view, 2)
-            nb_binomes = int(
-                math.factorial(nb_views) / 2 / math.factorial(nb_views - 2))
-            couple_diversities = np.zeros(nb_binomes)
-            for binome_index, binome in enumerate(binomes):
-                (view_index_1, classifier_index_1), (
-                    view_index_2, classifier_index_2) = binome
-                couple_diversity = np.mean(
-                    self.diversity_measure(
-                        classifiers_decisions[classifier_index_1, view_index_1],
-                        classifiers_decisions[classifier_index_2, view_index_2],
-                        y[examples_indices])
-                )
-                couple_diversities[binome_index] = couple_diversity
-            div_measure[combinations_index] = np.mean(couple_diversities)
-        best_combi_index = np.argmax(div_measure)
-        best_combination = combis[best_combi_index]
-        self.monoview_estimators = [
-            self.monoview_estimators[classifier_index][view_index]
-            for view_index, classifier_index
-            in enumerate(best_combination)]
-
-#
-# def CQ_div_measure(classifiersNames, classifiersDecisions, measurement,
-#                    foldsGroudTruth):
-#     """
-#     This function is used to measure a pseudo-CQ measurement based on the minCq algorithm.
-#     It's a mix between couple_div_measure and global_div_measure that uses multiple measurements.
-#     """
-#     nbViews, nbClassifiers, nbFolds, foldsLen = classifiersDecisions.shape
-#     combinations = itertools.combinations_with_replacement(range(nbClassifiers),
-#                                                            nbViews)
-#     nbCombinations = int(
-#         math.factorial(nbClassifiers + nbViews - 1) / math.factorial(
-#             nbViews) / math.factorial(nbClassifiers - 1))
-#     div_measure = np.zeros(nbCombinations)
-#     combis = np.zeros((nbCombinations, nbViews), dtype=int)
-#
-#     for combinationsIndex, combination in enumerate(combinations):
-#         combis[combinationsIndex] = combination
-#         combiWithView = [(viewIndex, combiIndex) for viewIndex, combiIndex in
-#                          enumerate(combination)]
-#         binomes = itertools.combinations(combiWithView, 2)
-#         nbBinomes = int(
-#             math.factorial(nbViews) / 2 / math.factorial(nbViews - 2))
-#         disagreement = np.zeros(nbBinomes)
-#         div_measure[combinationsIndex] = measurement[1](classifiersDecisions,
-#                                                         combination,
-#                                                         foldsGroudTruth,
-#                                                         foldsLen)
-#         for binomeIndex, binome in enumerate(binomes):
-#             (viewIndex1, classifierIndex1), (
-#             viewIndex2, classifierIndex2) = binome
-#             nbDisagree = np.sum(measurement[0](
-#                 classifiersDecisions[viewIndex1, classifierIndex1],
-#                 classifiersDecisions[viewIndex2, classifierIndex2],
-#                 foldsGroudTruth)
-#                                 , axis=1) / float(foldsLen)
-#             disagreement[binomeIndex] = np.mean(nbDisagree)
-#         div_measure[combinationsIndex] /= float(np.mean(disagreement))
-#     bestCombiIndex = np.argmin(div_measure)
-#     bestCombination = combis[bestCombiIndex]
-#
-#     return [classifiersNames[viewIndex][index] for viewIndex, index in
-#             enumerate(bestCombination)], div_measure[
-#                bestCombiIndex]
-#
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
deleted file mode 100644
index 29447d15b9ce9e45c5f3365b342f1b6fbfe07b92..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/fusion_utils.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import inspect
-
-from ...multiview.multiview_utils import get_monoview_classifier
-from ...utils.multiclass import get_mc_estim
-
-
-class BaseFusionClassifier():
-
-    def init_monoview_estimator(self, classifier_name, classifier_config,
-                                classifier_index=None, multiclass=False):
-        if classifier_index is not None:
-            if classifier_config is not None :
-                classifier_configs = classifier_config
-            else:
-                classifier_configs = None
-        else:
-            classifier_configs = classifier_config
-        if classifier_configs is not None and classifier_name in classifier_configs:
-            if 'random_state' in inspect.getfullargspec(
-                    get_monoview_classifier(classifier_name).__init__).args:
-                estimator = get_monoview_classifier(classifier_name)(
-                    random_state=self.random_state,
-                    **classifier_configs[classifier_name])
-            else:
-                estimator = get_monoview_classifier(classifier_name)(
-                    **classifier_configs[classifier_name])
-        else:
-            if 'random_state' in inspect.getfullargspec(
-                    get_monoview_classifier(classifier_name).__init__).args:
-                estimator = get_monoview_classifier(classifier_name)(
-                    random_state=self.random_state)
-            else:
-                estimator = get_monoview_classifier(classifier_name)()
-
-        return get_mc_estim(estimator, random_state=self.random_state,
-                            multiview=False, multiclass=multiclass)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
deleted file mode 100644
index e9cbac4c770a826183d713d691f7bcee25225cbe..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/jumbo_fusion_utils.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import numpy as np
-
-from .late_fusion_utils import LateFusionClassifier
-from ...monoview.monoview_utils import CustomRandint
-from ...utils.dataset import get_examples_views_indices
-
-
-class BaseJumboFusion(LateFusionClassifier):
-
-    def __init__(self, random_state, classifiers_names=None,
-                 classifier_configs=None,
-                 nb_cores=1, weights=None, nb_monoview_per_view=1, rs=None):
-        LateFusionClassifier.__init__(self, random_state,
-                                      classifiers_names=classifiers_names,
-                                      classifier_configs=classifier_configs,
-                                      nb_cores=nb_cores, weights=weights,
-                                      rs=rs)
-        self.param_names += ["nb_monoview_per_view", ]
-        self.distribs += [CustomRandint(1, 10)]
-        self.nb_monoview_per_view = nb_monoview_per_view
-
-    def set_params(self, nb_monoview_per_view=1, **params):
-        self.nb_monoview_per_view = nb_monoview_per_view
-        LateFusionClassifier.set_params(self, **params)
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                   example_indices,
-                                                                   view_indices)
-        self._check_views(view_indices)
-        monoview_decisions = self.predict_monoview(X,
-                                                   example_indices=example_indices,
-                                                   view_indices=view_indices)
-        return self.aggregation_estimator.predict(monoview_decisions)
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        train_indices, view_indices = get_examples_views_indices(X,
-                                                                 train_indices,
-                                                                 view_indices)
-        self.used_views = view_indices
-        self.init_classifiers(len(view_indices),
-                              nb_monoview_per_view=self.nb_monoview_per_view)
-        self.fit_monoview_estimators(X, y, train_indices=train_indices,
-                                     view_indices=view_indices)
-        monoview_decisions = self.predict_monoview(X,
-                                                   example_indices=train_indices,
-                                                   view_indices=view_indices)
-        self.aggregation_estimator.fit(monoview_decisions, y[train_indices])
-        return self
-
-    def fit_monoview_estimators(self, X, y, train_indices=None,
-                                view_indices=None):
-        if np.unique(y).shape[0] > 2:
-            multiclass = True
-        else:
-            multiclass = False
-        self.monoview_estimators = [
-            [self.init_monoview_estimator(classifier_name,
-                                          self.classifier_configs[
-                                              classifier_index],
-                                          multiclass=multiclass)
-             for classifier_index, classifier_name
-             in enumerate(self.classifiers_names)]
-            for _ in view_indices]
-
-        self.monoview_estimators = [[estimator.fit(
-            X.get_v(view_indices[idx], train_indices), y[train_indices])
-                                     for estimator in view_estimators]
-                                    for idx, view_estimators in
-                                    enumerate(self.monoview_estimators)]
-        return self
-
-    def predict_monoview(self, X, example_indices=None, view_indices=None):
-        monoview_decisions = np.zeros((len(example_indices),
-                                       len(view_indices) * len(
-                                           self.classifiers_names)))
-        for idx, view_estimators in enumerate(self.monoview_estimators):
-            for estimator_index, estimator in enumerate(view_estimators):
-                monoview_decisions[:, len(
-                    self.classifiers_names) * idx + estimator_index] = estimator.predict(
-                    X.get_v(view_indices[idx],
-                            example_indices))
-        return monoview_decisions
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
deleted file mode 100644
index 0916f76fb8fa74c2287e6bde40a38f63cdf9743a..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/late_fusion_utils.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import numpy as np
-
-from .fusion_utils import BaseFusionClassifier
-from ...multiview.multiview_utils import BaseMultiviewClassifier, \
-    get_available_monoview_classifiers, ConfigGenerator
-from ...utils.dataset import get_examples_views_indices
-
-
-class ClassifierDistribution:
-
-    def __init__(self, seed=42, available_classifiers=None):
-        self.random_state = np.random.RandomState(seed)
-        self.available_classifiers = available_classifiers
-
-    def draw(self, nb_view, rs=None):
-        if rs is not None:
-            self.random_state.seed(rs)
-        return self.random_state.choice(self.available_classifiers,
-                                        size=nb_view, replace=True)
-
-
-class ClassifierCombinator:
-
-    def __init__(self, need_probas=False):
-        self.available_classifiers = get_available_monoview_classifiers(
-            need_probas)
-
-    def rvs(self, random_state=None):
-        return ClassifierDistribution(seed=random_state.randint(1),
-                                      available_classifiers=self.available_classifiers)
-
-
-class ConfigDistribution:
-
-    def __init__(self, seed=42, available_classifiers=None):
-        self.random_state = np.random.RandomState(seed)
-        self.config_generator = ConfigGenerator(available_classifiers)
-
-    def draw(self, nb_view, rs=None):
-        if rs is not None:
-            self.random_state.seed(rs)
-        config_samples = [self.config_generator.rvs(self.random_state)
-                          for _ in range(nb_view)]
-        return config_samples
-
-
-class MultipleConfigGenerator:
-
-    def __init__(self, ):
-        self.available_classifiers = get_available_monoview_classifiers()
-
-    def rvs(self, random_state=None):
-        return ConfigDistribution(seed=random_state.randint(1),
-                                  available_classifiers=self.available_classifiers)
-
-
-class WeightDistribution:
-
-    def __init__(self, seed=42, distribution_type="uniform"):
-        self.random_state = np.random.RandomState(seed)
-        self.distribution_type = distribution_type
-
-    def draw(self, nb_view):
-        if self.distribution_type == "uniform":
-            return self.random_state.random_sample(nb_view)
-
-
-class WeightsGenerator:
-
-    def __init__(self, distibution_type="uniform"):
-        self.distribution_type = distibution_type
-
-    def rvs(self, random_state=None):
-        return WeightDistribution(seed=random_state.randint(1),
-                                  distribution_type=self.distribution_type)
-
-
-class LateFusionClassifier(BaseMultiviewClassifier, BaseFusionClassifier):
-
-    def __init__(self, random_state=None, classifiers_names=None,
-                 classifier_configs=None, nb_cores=1, weights=None,
-                 rs=None):
-        BaseMultiviewClassifier.__init__(self, random_state)
-        self.classifiers_names = classifiers_names
-        self.classifier_configs = classifier_configs
-        self.nb_cores = nb_cores
-        self.weights = weights
-        self.rs = rs
-        self.param_names = ["classifiers_names", "classifier_configs",
-                            "weights", "rs"]
-        self.distribs = [ClassifierCombinator(need_probas=self.need_probas),
-                         MultipleConfigGenerator(),
-                         WeightsGenerator(),
-                         np.arange(1000)]
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        train_indices, view_indices = get_examples_views_indices(X,
-                                                                 train_indices,
-                                                                 view_indices)
-        self.used_views = view_indices
-        if np.unique(y).shape[0] > 2:
-            multiclass = True
-        else:
-            multiclass = False
-        self.init_params(len(view_indices), multiclass)
-        if np.unique(y[train_indices]).shape[0] > 2:
-            raise ValueError("Multiclass not supported")
-        self.monoview_estimators = [
-            monoview_estimator.fit(X.get_v(view_index, train_indices),
-                                   y[train_indices])
-            for view_index, monoview_estimator
-            in zip(view_indices,
-                   self.monoview_estimators)]
-        return self
-
-    def init_params(self, nb_view, mutliclass=False):
-        if self.weights is None:
-            self.weights = np.ones(nb_view) / nb_view
-        elif isinstance(self.weights, WeightDistribution):
-            self.weights = self.weights.draw(nb_view)
-        else:
-            self.weights = self.weights / np.sum(self.weights)
-
-        self.init_classifiers(nb_view)
-
-        self.monoview_estimators = [
-            self.init_monoview_estimator(classifier_name,
-                                         self.classifier_configs[
-                                             classifier_index],
-                                         classifier_index=classifier_index,
-                                         multiclass=mutliclass)
-            for classifier_index, classifier_name
-            in enumerate(self.classifiers_names)]
-
-    def init_classifiers(self, nb_view, nb_monoview_per_view=None):
-        if nb_monoview_per_view is not None:
-            nb_clfs = nb_monoview_per_view
-        else:
-            nb_clfs = nb_view
-
-        if isinstance(self.classifiers_names, ClassifierDistribution):
-            self.classifiers_names = self.classifiers_names.draw(nb_clfs,
-                                                                 self.rs)
-        elif self.classifiers_names is None:
-            self.classifiers_names = ["decision_tree" for _ in range(nb_clfs)]
-        elif isinstance(self.classifiers_names, str):
-            self.classifiers_names = [self.classifiers_names
-                                      for _ in range(nb_clfs)]
-
-        if isinstance(self.classifier_configs, ConfigDistribution):
-            self.classifier_configs = [{classifier_name : config[classifier_name]} for config, classifier_name in zip(self.classifier_configs.draw(nb_clfs,
-                                                                   self.rs), self.classifiers_names)]
-        elif isinstance(self.classifier_configs, dict):
-            self.classifier_configs = [
-                {classifier_name: self.classifier_configs[classifier_name]} for
-                classifier_name in self.classifiers_names]
-        elif self.classifier_configs is None:
-            self.classifier_configs = [None for _ in range(nb_clfs)]
-
-    # def verif_clf_views(self, classifier_names, nb_view):
-    #     if classifier_names is None:
-    #         if nb_view is None:
-    #             raise AttributeError(self.__class__.__name__+" must have either classifier_names or nb_views provided.")
-    #         else:
-    #             self.classifiers_names = self.get_classifiers(get_available_monoview_classifiers(), nb_view)
-    #     else:
-    #         if nb_view is None:
-    #             self.classifiers_names = classifier_names
-    #         else:
-    #             if len(classifier_names)==nb_view:
-    #                 self.classifiers_names = classifier_names
-    #             else:
-    #                 warnings.warn("nb_view and classifier_names not matching, choosing nb_view random classifiers in classifier_names.", UserWarning)
-    #                 self.classifiers_names = self.get_classifiers(classifier_names, nb_view)
-
-    def get_classifiers(self, classifiers_names, nb_choices):
-        return self.random_state.choice(classifiers_names, size=nb_choices,
-                                        replace=True)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
deleted file mode 100644
index 5fbd4d56aeb6ae4b5bec4f6c8be8e25f24473c44..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/additions/utils.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import numpy as np
-from sklearn.base import BaseEstimator, ClassifierMixin
-
-
-def get_names(classed_list):
-    return np.array([object_.__class__.__name__ for object_ in classed_list])
-
-
-# class BaseMultiviewClassifier(BaseEstimator, ClassifierMixin):
-#
-#     def __init__(self, random_state):
-#         self.random_state = random_state
-#
-#     def genBestParams(self, detector):
-#         return dict((param_name, detector.best_params_[param_name])
-#                     for param_name in self.param_names)
-#
-#     def genParamsFromDetector(self, detector):
-#         if self.classed_params:
-#             classed_dict = dict((classed_param, get_names(
-#                 detector.cv_results_["param_" + classed_param]))
-#                                 for classed_param in self.classed_params)
-#         if self.param_names:
-#             return [(param_name,
-#                      np.array(detector.cv_results_["param_" + param_name]))
-#                     if param_name not in self.classed_params else (
-#                 param_name, classed_dict[param_name])
-#                     for param_name in self.param_names]
-#         else:
-#             return [()]
-#
-#     def genDistribs(self):
-#         return dict((param_name, distrib) for param_name, distrib in
-#                     zip(self.param_names, self.distribs))
-#
-#     def getConfig(self):
-#         if self.param_names:
-#             return "\n\t\t- " + self.__class__.__name__ + "with " + ", ".join(
-#                 [param_name + " : " + self.to_str(param_name) for param_name in
-#                  self.param_names])
-#         else:
-#             return "\n\t\t- " + self.__class__.__name__ + "with no config."
-#
-#     def to_str(self, param_name):
-#         if param_name in self.weird_strings:
-#             if self.weird_strings[param_name] == "class_name":
-#                 return self.get_params()[param_name].__class__.__name__
-#             else:
-#                 return self.weird_strings[param_name](
-#                     self.get_params()[param_name])
-#         else:
-#             return str(self.get_params()[param_name])
-#
-#     def get_interpretation(self):
-#         return "No detailed interpretation function"
-
-#
-# def get_train_views_indices(dataset, train_indices, view_indices, ):
-#     """This function  is used to get all the examples indices and view indices if needed"""
-#     if view_indices is None:
-#         view_indices = np.arange(dataset.nb_view)
-#     if train_indices is None:
-#         train_indices = range(dataset.get_nb_examples())
-#     return train_indices, view_indices
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
deleted file mode 100644
index b1cd5f9e6ea962cbffdbf5fa98bfea6e092ce9c0..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/bayesian_inference_fusion.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import numpy as np
-
-from ..multiview_classifiers.additions.late_fusion_utils import \
-    LateFusionClassifier
-from ..utils.dataset import get_examples_views_indices
-
-classifier_class_name = "BayesianInferenceClassifier"
-
-
-class BayesianInferenceClassifier(LateFusionClassifier):
-    def __init__(self, random_state, classifiers_names=None,
-                 classifier_configs=None, nb_cores=1, weights=None,
-                 rs=None):
-        self.need_probas = True
-        LateFusionClassifier.__init__(self, random_state=random_state,
-                                      classifiers_names=classifiers_names,
-                                      classifier_configs=classifier_configs,
-                                      nb_cores=nb_cores,
-                                      weights=weights,
-                                      rs=rs)
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                   example_indices,
-                                                                   view_indices)
-        self._check_views(view_indices)
-        if sum(self.weights) != 1.0:
-            self.weights = self.weights / sum(self.weights)
-
-        view_scores = []
-        for index, view_index in enumerate(view_indices):
-            view_scores.append(np.power(
-                self.monoview_estimators[index].predict_proba(
-                    X.get_v(view_index,
-                            example_indices)),
-                self.weights[index]))
-        view_scores = np.array(view_scores)
-        predicted_labels = np.argmax(np.prod(view_scores, axis=0), axis=1)
-        return predicted_labels
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
deleted file mode 100644
index 0c66e5619ba5091576808f9919583ab165c47f2f..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/difficulty_fusion.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
-    GlobalDiversityFusionClassifier
-
-classifier_class_name = "DifficultyFusion"
-
-
-class DifficultyFusion(GlobalDiversityFusionClassifier):
-
-    def diversity_measure(self, classifiers_decisions, combination, y):
-        _, nb_view, nb_examples = classifiers_decisions.shape
-        scores = np.zeros((nb_view, nb_examples), dtype=int)
-        for view_index, classifier_index in enumerate(combination):
-            scores[view_index, :] = np.logical_not(
-                np.logical_xor(classifiers_decisions[classifier_index,
-                                                     view_index],
-                               y)
-            )
-        # Table of the nuber of views that succeeded for each example :
-        difficulty_scores = np.sum(scores, axis=0)
-
-        difficulty_score = np.var(
-            np.array([
-                np.sum((difficulty_scores == view_index))
-                for view_index in range(len(combination) + 1)])
-        )
-        return difficulty_score
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
deleted file mode 100644
index cee032a878b8ba9e062654f685317f193607b014..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/disagree_fusion.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
-    CoupleDiversityFusionClassifier
-
-classifier_class_name = "DisagreeFusion"
-
-
-class DisagreeFusion(CoupleDiversityFusionClassifier):
-
-    def diversity_measure(self, first_classifier_decision,
-                          second_classifier_decision, _):
-        return np.logical_xor(first_classifier_decision,
-                              second_classifier_decision)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py
deleted file mode 100644
index 12eb6b64c39a1606f950e24e3a2e30e35fee10b9..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/double_fault_fusion.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
-    CoupleDiversityFusionClassifier
-
-classifier_class_name = "DoubleFaultFusion"
-
-
-class DoubleFaultFusion(CoupleDiversityFusionClassifier):
-
-    def diversity_measure(self, first_classifier_decision,
-                          second_classifier_decision, y):
-        return np.logical_and(np.logical_xor(first_classifier_decision, y),
-                              np.logical_xor(second_classifier_decision, y))
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
deleted file mode 100644
index 3c3d5aef7c6453540e06083b37bba0f1935ae62b..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/entropy_fusion.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils import \
-    GlobalDiversityFusionClassifier
-
-classifier_class_name = "EntropyFusion"
-
-
-class EntropyFusion(GlobalDiversityFusionClassifier):
-
-    def diversity_measure(self, classifiers_decisions, combination, y):
-        _, nb_view, nb_examples = classifiers_decisions.shape
-        scores = np.zeros((nb_view, nb_examples), dtype=int)
-        for view_index, classifier_index in enumerate(combination):
-            scores[view_index] = np.logical_not(
-                np.logical_xor(
-                    classifiers_decisions[classifier_index, view_index],
-                    y)
-            )
-        entropy_scores = np.sum(scores, axis=0)
-        nb_view_matrix = np.zeros((nb_examples),
-                                  dtype=int) + nb_view - entropy_scores
-        entropy_score = np.mean(
-            np.minimum(entropy_scores, nb_view_matrix).astype(float) / (
-                    nb_view - int(nb_view / 2)))
-        return entropy_score
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
deleted file mode 100644
index 53a255c764f79c8e68271caba38539dea019c774..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/majority_voting_fusion.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import numpy as np
-
-from ..multiview_classifiers.additions.late_fusion_utils import \
-    LateFusionClassifier
-from ..utils.dataset import get_examples_views_indices
-
-classifier_class_name = "MajorityVoting"
-
-
-class VotingIndecision(Exception):
-    pass
-
-
-class MajorityVoting(LateFusionClassifier):
-    def __init__(self, random_state, classifiers_names=None,
-                 classifier_configs=None, weights=None, nb_cores=1, rs=None):
-        self.need_probas = False
-        LateFusionClassifier.__init__(self, random_state=random_state,
-                                      classifiers_names=classifiers_names,
-                                      classifier_configs=classifier_configs,
-                                      nb_cores=nb_cores,
-                                      weights=weights,
-                                      rs=rs)
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        examples_indices, view_indices = get_examples_views_indices(X,
-                                                                     example_indices,
-                                                                     view_indices)
-        self._check_views(view_indices)
-        n_examples = len(examples_indices)
-        votes = np.zeros((n_examples, X.get_nb_class(example_indices)),
-                         dtype=float)
-        monoview_decisions = np.zeros((len(examples_indices), X.nb_view),
-                                      dtype=int)
-        for index, view_index in enumerate(view_indices):
-            monoview_decisions[:, index] = self.monoview_estimators[
-                index].predict(
-                X.get_v(view_index, examples_indices))
-        for example_index in range(n_examples):
-            for view_index, feature_classification in enumerate(
-                    monoview_decisions[example_index, :]):
-                votes[example_index, feature_classification] += self.weights[
-                    view_index]
-            nb_maximum = len(
-                np.where(votes[example_index] == max(votes[example_index]))[0])
-            if nb_maximum == X.nb_view:
-                raise VotingIndecision(
-                    "Majority voting can't decide, each classifier has voted for a different class")
-
-        predicted_labels = np.argmax(votes, axis=1)
-        # Can be upgraded by restarting a new classification process if
-        # there are multiple maximums ?:
-        # 	while nbMaximum>1:
-        # 		relearn with only the classes that have a maximum number of vote
-        return predicted_labels
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
deleted file mode 100644
index d9a2e38d21a9be49690ca372616ebde60a438f55..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/svm_jumbo_fusion.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from sklearn.svm import SVC
-
-from .additions.jumbo_fusion_utils import BaseJumboFusion
-from ..monoview.monoview_utils import CustomUniform, CustomRandint
-
-classifier_class_name = "SVMJumboFusion"
-
-
-class SVMJumboFusion(BaseJumboFusion):
-
-    def __init__(self, random_state=None, classifiers_names=None,
-                 classifier_configs=None, nb_cores=1, weights=None,
-                 nb_monoview_per_view=1, C=1.0, kernel="rbf", degree=2,
-                 rs=None):
-        self.need_probas = False
-        BaseJumboFusion.__init__(self, random_state,
-                                 classifiers_names=classifiers_names,
-                                 classifier_configs=classifier_configs,
-                                 nb_cores=nb_cores, weights=weights,
-                                 nb_monoview_per_view=nb_monoview_per_view,
-                                 rs=rs)
-        self.param_names += ["C", "kernel", "degree"]
-        self.distribs += [CustomUniform(), ["rbf", "poly", "linear"],
-                          CustomRandint(2, 5)]
-        self.aggregation_estimator = SVC(C=C, kernel=kernel, degree=degree)
-        self.C = C
-        self.kernel = kernel
-        self.degree = degree
-
-    def set_params(self, C=1.0, kernel="rbf", degree=1, **params):
-        super(SVMJumboFusion, self).set_params(**params)
-        self.C = C
-        self.degree = degree
-        self.kernel = kernel
-        self.aggregation_estimator.set_params(C=C, kernel=kernel, degree=degree)
-        return self
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
deleted file mode 100644
index 6635119f14390c1dddbe4dd14ccf0184615aad77..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_early_fusion.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers import monoview_classifiers
-from .additions.fusion_utils import BaseFusionClassifier
-from ..multiview.multiview_utils import get_available_monoview_classifiers, \
-    BaseMultiviewClassifier, ConfigGenerator
-from ..utils.dataset import get_examples_views_indices
-from ..utils.multiclass import get_mc_estim, MultiClassWrapper
-
-# from ..utils.dataset import get_v
-
-classifier_class_name = "WeightedLinearEarlyFusion"
-
-
-class WeightedLinearEarlyFusion(BaseMultiviewClassifier, BaseFusionClassifier):
-    """
-    WeightedLinearEarlyFusion
-
-    Parameters
-    ----------
-    random_state
-    view_weights
-    monoview_classifier_name
-    monoview_classifier_config
-
-    Attributes
-    ----------
-    """
-
-    def __init__(self, random_state=None, view_weights=None,
-                 monoview_classifier_name="decision_tree",
-                 monoview_classifier_config={}):
-        BaseMultiviewClassifier.__init__(self, random_state=random_state)
-        self.view_weights = view_weights
-        self.monoview_classifier_name = monoview_classifier_name
-        self.short_name = "early_fusion"
-        if monoview_classifier_name in monoview_classifier_config:
-            self.monoview_classifier_config = monoview_classifier_config[
-                monoview_classifier_name]
-        self.monoview_classifier_config = monoview_classifier_config
-        # monoview_classifier_module = getattr(monoview_classifiers,
-        #                                      self.monoview_classifier_name)
-        # monoview_classifier_class = getattr(monoview_classifier_module,
-        #                                     monoview_classifier_module.classifier_class_name)
-        self.monoview_classifier = self.init_monoview_estimator(monoview_classifier_name, monoview_classifier_config)
-        self.param_names = ["monoview_classifier_name",
-                            "monoview_classifier_config"]
-        self.distribs = [get_available_monoview_classifiers(),
-                         ConfigGenerator(get_available_monoview_classifiers())]
-        self.classed_params = []
-        self.weird_strings = {}
-
-    def set_params(self, monoview_classifier_name="decision_tree",
-                   monoview_classifier_config={}, **params):
-        self.monoview_classifier_name = monoview_classifier_name
-        self.monoview_classifier = self.init_monoview_estimator(
-            monoview_classifier_name,
-            monoview_classifier_config)
-        self.monoview_classifier_config = self.monoview_classifier.get_params()
-        self.short_name = "early_fusion"
-        return self
-
-    def get_params(self, deep=True):
-        return {"random_state": self.random_state,
-                "view_weights": self.view_weights,
-                "monoview_classifier_name": self.monoview_classifier_name,
-                "monoview_classifier_config": self.monoview_classifier_config}
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        train_indices, X = self.transform_data_to_monoview(X, train_indices,
-                                                           view_indices)
-        self.used_views = view_indices
-        if np.unique(y[train_indices]).shape[0] > 2 and \
-                not (isinstance(self.monoview_classifier, MultiClassWrapper)):
-            self.monoview_classifier = get_mc_estim(self.monoview_classifier,
-                                                    self.random_state,
-                                                    multiview=False,
-                                                    y=y[train_indices])
-        self.monoview_classifier.fit(X, y[train_indices])
-        self.monoview_classifier_config = self.monoview_classifier.get_params()
-        return self
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        _, X = self.transform_data_to_monoview(X, example_indices, view_indices)
-        self._check_views(self.view_indices)
-        predicted_labels = self.monoview_classifier.predict(X)
-        return predicted_labels
-
-    def transform_data_to_monoview(self, dataset, example_indices,
-                                   view_indices):
-        """Here, we extract the data from the HDF5 dataset file and store all
-        the concatenated views in one variable"""
-        example_indices, self.view_indices = get_examples_views_indices(dataset,
-                                                                        example_indices,
-                                                                        view_indices)
-        if self.view_weights is None:
-            self.view_weights = np.ones(len(self.view_indices), dtype=float)
-        else:
-            self.view_weights = np.array(self.view_weights)
-        self.view_weights /= float(np.sum(self.view_weights))
-
-        X = self.hdf5_to_monoview(dataset, example_indices)
-        return example_indices, X
-
-    def hdf5_to_monoview(self, dataset, examples):
-        """Here, we concatenate the views for the asked examples """
-        monoview_data = np.concatenate(
-            [dataset.get_v(view_idx, examples)
-             for view_weight, (index, view_idx)
-             in zip(self.view_weights, enumerate(self.view_indices))]
-            , axis=1)
-        return monoview_data
-
-    # def set_monoview_classifier_config(self, monoview_classifier_name, monoview_classifier_config):
-    #     if monoview_classifier_name in monoview_classifier_config:
-    #         self.monoview_classifier.set_params(**monoview_classifier_config[monoview_classifier_name])
-    #     else:
-    #         self.monoview_classifier.set_params(**monoview_classifier_config)
diff --git a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py b/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
deleted file mode 100644
index 403791ceec03ef3c18e9152a996bc5a39d41bd54..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/multiview_classifiers/weighted_linear_late_fusion.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import numpy as np
-
-from ..multiview_classifiers.additions.late_fusion_utils import \
-    LateFusionClassifier
-from ..utils.dataset import get_examples_views_indices
-
-classifier_class_name = "WeightedLinearLateFusion"
-
-
-class WeightedLinearLateFusion(LateFusionClassifier):
-    def __init__(self, random_state, classifiers_names=None,
-                 classifier_configs=None, weights=None, nb_cores=1, rs=None):
-        self.need_probas = True
-        LateFusionClassifier.__init__(self, random_state=random_state,
-                                      classifiers_names=classifiers_names,
-                                      classifier_configs=classifier_configs,
-                                      nb_cores=nb_cores, weights=weights, rs=rs)
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                    example_indices,
-                                                                    view_indices)
-        self._check_views(view_indices)
-        view_scores = []
-        for index, viewIndex in enumerate(view_indices):
-            view_scores.append(
-                np.array(self.monoview_estimators[index].predict_proba(
-                    X.get_v(viewIndex, example_indices))) * self.weights[index])
-        view_scores = np.array(view_scores)
-        predicted_labels = np.argmax(np.sum(view_scores, axis=0), axis=1)
-        return predicted_labels
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/__init__.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/duration_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/duration_analysis.py
deleted file mode 100644
index fb3a539cf99d1c16132d838fb7c3ecf81f3c41e9..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/duration_analysis.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import plotly
-import pandas as pd
-
-
-def get_duration(results):
-    df = pd.DataFrame(columns=["hps", "fit", "pred"], )
-    for classifier_result in results:
-        df.at[classifier_result.get_classifier_name(),
-              "hps"] = classifier_result.hps_duration
-        df.at[classifier_result.get_classifier_name(),
-              "fit"] = classifier_result.fit_duration
-        df.at[classifier_result.get_classifier_name(),
-              "pred"] = classifier_result.pred_duration
-    return df
-
-def plot_durations(durations, directory, database_name, durations_stds=None): # pragma: no cover
-    file_name = os.path.join(directory, database_name + "-durations")
-    durations.to_csv(file_name+"_dataframe.csv")
-    fig = plotly.graph_objs.Figure()
-    if durations_stds is None:
-        durations_stds = pd.DataFrame(0, durations.index, durations.columns)
-    else:
-        durations_stds.to_csv(file_name+"_stds_dataframe.csv")
-    fig.add_trace(plotly.graph_objs.Bar(name='Hyper-parameter Optimization',
-                                        x=durations.index,
-                                        y=durations['hps'],
-                                        error_y=dict(type='data',
-                                                     array=durations_stds["hps"]),
-                                        marker_color="grey"))
-    fig.add_trace(plotly.graph_objs.Bar(name='Fit (on train set)',
-                                        x=durations.index,
-                                        y=durations['fit'],
-                                        error_y=dict(type='data',
-                                                     array=durations_stds["fit"]),
-                                        marker_color="black"))
-    fig.add_trace(plotly.graph_objs.Bar(name='Prediction (on test set)',
-                                        x=durations.index,
-                                        y=durations['pred'],
-                                        error_y=dict(type='data',
-                                                     array=durations_stds["pred"]),
-                                        marker_color="lightgrey"))
-    fig.update_layout(title="Durations for each classfier",
-                      yaxis_title="Duration (s)")
-    fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                      plot_bgcolor='rgba(0,0,0,0)')
-    plotly.offline.plot(fig, filename=file_name + ".html", auto_open=False)
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py
deleted file mode 100644
index 97aa6baa7ad3f1b6902c69eb4287a4005660f78e..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/error_analysis.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Import built-in modules
-import logging
-import os
-
-import matplotlib as mpl
-# Import third party modules
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import plotly
-from matplotlib.patches import Patch
-
-# Import own Modules
-
-
-def get_example_errors(groud_truth, results):
-    r"""Used to get for each classifier and each example whether the classifier
-     has misclassified the example or not.
-
-    Parameters
-    ----------
-    ground_truth : numpy array of 0, 1 and -100 (if multiclass)
-        The array with the real labels of the examples
-    results : list of MonoviewResult and MultiviewResults objects
-        A list containing all the resluts for all the mono- & multi-view
-        experimentations.
-
-    Returns
-    -------
-    example_errors : dict of np.array
-        For each classifier, has an entry with a `np.array` over the examples,
-         with a 1 if the examples was
-        well-classified, a 0 if not and if it's multiclass classification, a
-         -100 if the examples was not seen during
-        the one versus one classification.
-    """
-    example_errors = {}
-
-    for classifier_result in results:
-        error_on_examples = np.equal(classifier_result.full_labels_pred,
-                                     groud_truth).astype(int)
-        unseen_examples = np.where(groud_truth == -100)[0]
-        error_on_examples[unseen_examples] = -100
-        example_errors[
-            classifier_result.get_classifier_name()] = error_on_examples
-    return example_errors
-
-
-def publish_example_errors(example_errors, directory, databaseName,
-                           labels_names, example_ids, labels): # pragma: no cover
-    logging.debug("Start:\t Label analysis figure generation")
-
-    base_file_name = os.path.join(directory, databaseName + "-" )
-
-    nb_classifiers, nb_examples, classifiers_names, \
-    data_2d, error_on_examples = gen_error_data(example_errors)
-
-    np.savetxt(base_file_name + "2D_plot_data.csv", data_2d, delimiter=",")
-    np.savetxt(base_file_name + "bar_plot_data.csv", error_on_examples,
-               delimiter=",")
-
-    plot_2d(data_2d, classifiers_names, nb_classifiers, base_file_name,
-            example_ids=example_ids, labels=labels)
-
-    plot_errors_bar(error_on_examples, nb_examples,
-                    base_file_name, example_ids=example_ids)
-
-    logging.debug("Done:\t Label analysis figures generation")
-
-
-def publish_all_example_errors(iter_results, directory,
-                               stats_iter,
-                               example_ids, labels): # pragma: no cover
-    logging.debug(
-        "Start:\t Global label analysis figure generation")
-
-    nb_examples, nb_classifiers, data, \
-    error_on_examples, classifier_names = gen_error_data_glob(iter_results,
-                                                              stats_iter)
-
-    np.savetxt(os.path.join(directory, "clf_errors.csv"), data, delimiter=",")
-    np.savetxt(os.path.join(directory, "example_errors.csv"), error_on_examples,
-               delimiter=",")
-
-    plot_2d(data, classifier_names, nb_classifiers,
-            os.path.join(directory, ""), stats_iter=stats_iter,
-            example_ids=example_ids, labels=labels)
-    plot_errors_bar(error_on_examples, nb_examples, os.path.join(directory, ""),
-                    example_ids=example_ids)
-
-    logging.debug(
-        "Done:\t Global label analysis figures generation")
-
-
-def gen_error_data(example_errors):
-    r"""Used to format the error data in order to plot it efficiently. The
-    data is saves in a `.csv` file.
-
-    Parameters
-    ----------
-    example_errors : dict of dicts of np.arrays
-        A dictionary conatining all the useful data. Organized as :
-        `example_errors[<classifier_name>]["error_on_examples"]` is a np.array
-        of ints with a
-        - 1 if the classifier `<classifier_name>` classifier well the example,
-        - 0 if it fail to classify the example,
-        - -100 if it did not classify the example (multiclass one versus one).
-
-    Returns
-    -------
-    nbClassifiers : int
-        Number of different classifiers.
-    nbExamples : int
-        NUmber of examples.
-    nbCopies : int
-        The number of times the data is copied (classifier wise) in order for
-        the figure to be more readable.
-    classifiers_names : list of strs
-        The names fo the classifiers.
-    data : np.array of shape `(nbClassifiers, nbExamples)`
-        A matrix with zeros where the classifier failed to classifiy the
-        example, ones where it classified it well
-        and -100 if the example was not classified.
-    error_on_examples : np.array of shape `(nbExamples,)`
-        An array counting how many classifiers failed to classifiy each
-        examples.
-    """
-    nb_classifiers = len(example_errors)
-    nb_examples = len(list(example_errors.values())[0])
-    classifiers_names = list(example_errors.keys())
-
-    data_2d = np.zeros((nb_examples, nb_classifiers))
-    for classifierIndex, (classifier_name, error_on_examples) in enumerate(
-            example_errors.items()):
-        data_2d[:, classifierIndex] = error_on_examples
-    error_on_examples = np.sum(data_2d, axis=1) / nb_classifiers
-    return nb_classifiers, nb_examples, classifiers_names, data_2d, error_on_examples
-
-
-def gen_error_data_glob(iter_results, stats_iter):
-    nb_examples = next(iter(iter_results.values())).shape[0]
-    nb_classifiers = len(iter_results)
-    data = np.zeros((nb_examples, nb_classifiers), dtype=int)
-    classifier_names = []
-    for clf_index, (classifier_name, error_data) in enumerate(
-            iter_results.items()):
-        data[:, clf_index] = error_data
-        classifier_names.append(classifier_name)
-    error_on_examples = np.sum(data, axis=1) / (
-                nb_classifiers * stats_iter)
-    return nb_examples, nb_classifiers, data, error_on_examples, \
-           classifier_names
-
-
-def plot_2d(data, classifiers_names, nb_classifiers, file_name, labels=None,
-            stats_iter=1, use_plotly=True, example_ids=None): # pragma: no cover
-    r"""Used to generate a 2D plot of the errors.
-
-    Parameters
-    ----------
-    data : np.array of shape `(nbClassifiers, nbExamples)`
-        A matrix with zeros where the classifier failed to classifiy the example, ones where it classified it well
-        and -100 if the example was not classified.
-    classifiers_names : list of str
-        The names of the classifiers.
-    nb_classifiers : int
-        The number of classifiers.
-    file_name : str
-        The name of the file in which the figure will be saved ("error_analysis_2D.png" will be added at the end)
-    minSize : int, optinal, default: 10
-        The minimum width and height of the figure.
-    width_denominator : float, optional, default: 1.0
-        To obtain the image width, the number of classifiers will be divided by this number.
-    height_denominator : float, optional, default: 1.0
-        To obtain the image width, the number of examples will be divided by this number.
-    stats_iter : int, optional, default: 1
-        The number of statistical iterations realized.
-
-    Returns
-    -------
-    """
-    fig, ax = plt.subplots(nrows=1, ncols=1, )
-    label_index_list = np.concatenate([np.where(labels == i)[0] for i in
-                                       np.unique(
-                                           labels)])
-    cmap, norm = iter_cmap(stats_iter)
-    cax = plt.imshow(data[np.flip(label_index_list), :], cmap=cmap, norm=norm,
-                     aspect='auto')
-    plt.title('Errors depending on the classifier')
-    ticks = np.arange(0, nb_classifiers, 1)
-    tick_labels = classifiers_names
-    plt.xticks(ticks, tick_labels, rotation="vertical")
-    plt.yticks([], [])
-    plt.ylabel("Examples")
-    cbar = fig.colorbar(cax, ticks=[-100 * stats_iter / 2, 0, stats_iter])
-    cbar.ax.set_yticklabels(['Unseen', 'Always Wrong', 'Always Right'])
-
-    fig.savefig(file_name + "error_analysis_2D.png", bbox_inches="tight",
-                transparent=True)
-    plt.close()
-    ### The following part is used to generate an interactive graph.
-    if use_plotly:
-         # [np.where(labels==i)[0] for i in np.unique(labels)]
-        hover_text = [[example_ids[example_index] + " failed " + str(
-            stats_iter - data[
-                example_index, classifier_index]) + " time(s), labelled " + str(
-            labels[example_index])
-                       for classifier_index in range(data.shape[1])]
-                      for example_index in range(data.shape[0])]
-        fig = plotly.graph_objs.Figure()
-        fig.add_trace(plotly.graph_objs.Heatmap(
-            x=list(classifiers_names),
-            y=[example_ids[label_ind] for label_ind in label_index_list],
-            z=data[label_index_list, :],
-            text=[hover_text[label_ind] for label_ind in label_index_list],
-            hoverinfo=["y", "x", "text"],
-            colorscale="Greys",
-            colorbar=dict(tickvals=[0, stats_iter],
-                          ticktext=["Always Wrong", "Always Right"]),
-            reversescale=True), )
-        fig.update_yaxes(title_text="Examples", showticklabels=True)
-        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                          plot_bgcolor='rgba(0,0,0,0)')
-        fig.update_xaxes(showticklabels=True, )
-        plotly.offline.plot(fig, filename=file_name + "error_analysis_2D.html",
-                            auto_open=False)
-        del fig
-
-
-def plot_errors_bar(error_on_examples, nb_examples, file_name,
-                    use_plotly=True, example_ids=None): # pragma: no cover
-    r"""Used to generate a barplot of the muber of classifiers that failed to classify each examples
-
-    Parameters
-    ----------
-    error_on_examples : np.array of shape `(nbExamples,)`
-        An array counting how many classifiers failed to classifiy each examples.
-    classifiers_names : list of str
-        The names of the classifiers.
-    nb_classifiers : int
-        The number of classifiers.
-    nb_examples : int
-        The number of examples.
-    file_name : str
-        The name of the file in which the figure will be saved ("error_analysis_2D.png" will be added at the end)
-
-    Returns
-    -------
-    """
-    fig, ax = plt.subplots()
-    x = np.arange(nb_examples)
-    plt.bar(x, 1-error_on_examples)
-    plt.title("Number of classifiers that failed to classify each example")
-    fig.savefig(file_name + "error_analysis_bar.png", transparent=True)
-    plt.close()
-    if use_plotly:
-        fig = plotly.graph_objs.Figure([plotly.graph_objs.Bar(x=example_ids, y=1-error_on_examples)])
-        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                          plot_bgcolor='rgba(0,0,0,0)')
-        plotly.offline.plot(fig, filename=file_name + "error_analysis_bar.html",
-                            auto_open=False)
-
-
-
-
-def iter_cmap(statsIter): # pragma: no cover
-    r"""Used to generate a colormap that will have a tick for each iteration : the whiter the better.
-
-    Parameters
-    ----------
-    statsIter : int
-        The number of statistical iterations.
-
-    Returns
-    -------
-    cmap : matplotlib.colors.ListedColorMap object
-        The colormap.
-    norm : matplotlib.colors.BoundaryNorm object
-        The bounds for the colormap.
-    """
-    cmapList = ["red", "0.0"] + [str(float((i + 1)) / statsIter) for i in
-                                 range(statsIter)]
-    cmap = mpl.colors.ListedColormap(cmapList)
-    bounds = [-100 * statsIter - 0.5, -0.5]
-    for i in range(statsIter):
-        bounds.append(i + 0.5)
-    bounds.append(statsIter + 0.5)
-    norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
-    return cmap, norm
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py
deleted file mode 100644
index e620a9340b47b05760706f72cab16ae208eeb053..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/execution.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import logging
-import pandas as pd
-
-from .tracebacks_analysis import save_failed, publish_tracebacks
-from .duration_analysis import plot_durations, get_duration
-from .metric_analysis import get_metrics_scores, publish_metrics_graphs, publish_all_metrics_scores
-from .error_analysis import get_example_errors, publish_example_errors, publish_all_example_errors
-from .feature_importances import get_feature_importances, publish_feature_importances
-
-def analyze(results, stats_iter, benchmark_argument_dictionaries,
-                metrics, directory, example_ids, labels): # pragma: no cover
-    """Used to analyze the results of the previous benchmarks"""
-    data_base_name = benchmark_argument_dictionaries[0]["args"]["name"]
-
-    results_means_std, iter_results, flagged_failed, label_names = analyze_iterations(
-        results, benchmark_argument_dictionaries,
-        stats_iter, metrics, example_ids, labels)
-    if flagged_failed:
-        save_failed(flagged_failed, directory)
-
-    if stats_iter > 1:
-        results_means_std = analyze_all(
-            iter_results, stats_iter, directory,
-            data_base_name, example_ids, label_names)
-    return results_means_std
-
-
-def analyze_iterations(results, benchmark_argument_dictionaries, stats_iter,
-                       metrics, example_ids, labels):
-    r"""Used to extract and format the results of the different
-    experimentations performed.
-
-    Parameters
-    ----------
-    results : list
-        The result list returned by the benchmark execution function. For each
-         executed benchmark, contains
-        a flag & a result element.
-        The flag is a way to identify to which benchmark the results belong,
-        formatted this way :
-        `flag = iter_index, [classifierPositive, classifierNegative]` with
-        - `iter_index` the index of the statistical iteration
-        - `[classifierPositive, classifierNegative]` the indices of the labels
-        considered positive and negative
-        by the classifier (mainly useful for one versus one multiclass
-        classification).
-    benchmark_argument_dictionaries : list of dicts
-        The list of all the arguments passed to the benchmark executing
-        functions.
-    statsIter : int
-        The number of statistical iterations.
-    metrics : list of lists
-        THe list containing the metrics and their configuration.
-
-    Returns
-    -------
-    results : list of dicts of dicts
-        The list contains a dictionary for each statistical iteration. This
-        dictionary contains a dictionary for each
-        label combination, regrouping the scores for each metrics and the
-        information useful to plot errors on examples.
-    """
-    logging.debug("Start:\t Analyzing all results")
-    iter_results = {"metrics_scores": [i for i in range(stats_iter)],
-                    "class_metrics_scores": [i for i in range(stats_iter)],
-                    "example_errors": [i for i in range(stats_iter)],
-                    "feature_importances": [i for i in range(stats_iter)],
-                    "durations":[i for i in range(stats_iter)]}
-    flagged_tracebacks_list = []
-    fig_errors = []
-    for iter_index, result, tracebacks in results:
-        arguments = get_arguments(benchmark_argument_dictionaries, iter_index)
-        labels_names = list(arguments["labels_dictionary"].values())
-
-        metrics_scores, class_metric_scores = get_metrics_scores(metrics, result, labels_names)
-        example_errors = get_example_errors(labels, result)
-        feature_importances = get_feature_importances(result)
-        durations = get_duration(result)
-        directory = arguments["directory"]
-
-        database_name = arguments["args"]["name"]
-
-
-        flagged_tracebacks_list += publish_tracebacks(directory, database_name,
-                                                      labels_names, tracebacks,
-                                                      iter_index)
-        res = publish_metrics_graphs(metrics_scores, directory, database_name,
-                                     labels_names, class_metric_scores)
-        publish_example_errors(example_errors, directory, database_name,
-                               labels_names, example_ids, labels)
-        publish_feature_importances(feature_importances, directory,
-                                    database_name)
-        plot_durations(durations, directory, database_name)
-
-        iter_results["metrics_scores"][iter_index] = metrics_scores
-        iter_results["class_metrics_scores"][iter_index] = class_metric_scores
-        iter_results["example_errors"][iter_index] = example_errors
-        iter_results["feature_importances"][iter_index] = feature_importances
-        iter_results["labels"] = labels
-        iter_results["durations"][iter_index] = durations
-
-    logging.debug("Done:\t Analyzing all results")
-
-    return res, iter_results, flagged_tracebacks_list, labels_names
-
-
-def analyze_all(iter_results, stats_iter, directory, data_base_name,
-                example_ids, label_names): # pragma: no cover
-    """Used to format the results in order to plot the mean results on
-    the iterations"""
-    metrics_analysis, class_metrics_analysis, error_analysis, feature_importances, \
-    feature_importances_stds, labels, duration_means, \
-    duration_stds = format_previous_results(iter_results)
-
-    results = publish_all_metrics_scores(metrics_analysis, class_metrics_analysis,
-                                         directory,
-                                         data_base_name, stats_iter, label_names)
-    publish_all_example_errors(error_analysis, directory, stats_iter,
-                               example_ids, labels)
-    publish_feature_importances(feature_importances, directory,
-                                data_base_name, feature_importances_stds)
-    plot_durations(duration_means, directory, data_base_name, duration_stds)
-    return results
-
-def get_arguments(benchmark_argument_dictionaries, iter_index):
-    r"""Used to get the arguments passed to the benchmark executing function
-    corresponding to the flag of an
-    experimentation.
-
-    Parameters
-    ----------
-    flag : list
-        The needed experimentation's flag.
-    benchmark_argument_dictionaries : list of dicts
-        The list of all the arguments passed to the benchmark executing
-        functions.
-
-    Returns
-    -------
-    benchmark_argument_dictionary : dict
-        All the arguments passed to the benchmark executing function for the
-        needed experimentation.
-    """
-    for benchmark_argument_dictionary in benchmark_argument_dictionaries:
-        if benchmark_argument_dictionary["flag"] == iter_index:
-            return benchmark_argument_dictionary
-
-
-def format_previous_results(iter_results_lists):
-    """
-    Formats each statistical iteration's result into a mean/std analysis for
-    the metrics and adds the errors of each statistical iteration.
-
-    Parameters
-    ----------
-    iter_results_lists : The raw results, for each statistical iteration i
-     contains
-        - biclass_results[i]["metrics_scores"] is a dictionary with a
-        pd.dataframe for each metrics
-        - biclass_results[i]["example_errors"], a dicaitonary with a np.array
-        for each classifier.
-
-    Returns
-    -------
-    metrics_analysis : The mean and std dataframes for each metrics
-
-    error_analysis : A dictionary containing the added errors
-                     arrays for each classifier
-
-    """
-    metrics_analysis = {}
-    class_metrics_analysis = {}
-    feature_importances_analysis = {}
-    feature_importances_stds = {}
-
-    metric_concat_dict = {}
-    for iter_index, metrics_score in enumerate(
-            iter_results_lists["metrics_scores"]):
-        for metric_name, dataframe in metrics_score.items():
-            if metric_name not in metric_concat_dict:
-                metric_concat_dict[metric_name] = dataframe
-            else:
-                metric_concat_dict[metric_name] = pd.concat(
-                    [metric_concat_dict[metric_name], dataframe])
-
-    for metric_name, dataframe in metric_concat_dict.items():
-        metrics_analysis[metric_name] = {}
-        metrics_analysis[metric_name][
-            "mean"] = dataframe.groupby(dataframe.index).mean()
-        metrics_analysis[metric_name][
-            "std"] = dataframe.groupby(dataframe.index).std(ddof=0)
-
-    class_metric_concat_dict = {}
-    for iter_index, class_metrics_score in enumerate(
-            iter_results_lists["class_metrics_scores"]):
-        for metric_name, dataframe in class_metrics_score.items():
-            if metric_name not in class_metric_concat_dict:
-                class_metric_concat_dict[metric_name] = dataframe
-            else:
-                class_metric_concat_dict[metric_name] = pd.concat(
-                    [class_metric_concat_dict[metric_name], dataframe])
-
-    for metric_name, dataframe in class_metric_concat_dict.items():
-        class_metrics_analysis[metric_name] = {}
-        class_metrics_analysis[metric_name][
-            "mean"] = dataframe.groupby(dataframe.index).mean()
-        class_metrics_analysis[metric_name][
-            "std"] = dataframe.groupby(dataframe.index).std(ddof=0)
-
-    durations_df_concat = pd.DataFrame(dtype=float)
-    for iter_index, durations_df in enumerate(iter_results_lists["durations"]):
-        durations_df_concat = pd.concat((durations_df_concat, durations_df),
-                                        axis=1)
-    durations_df_concat = durations_df_concat.astype(float)
-    grouped_df = durations_df_concat.groupby(durations_df_concat.columns, axis=1)
-    duration_means = grouped_df.mean()
-    duration_stds = grouped_df.std()
-
-    importance_concat_dict = {}
-    for iter_index, view_feature_importances in enumerate(
-            iter_results_lists["feature_importances"]):
-        for view_name, feature_importances in view_feature_importances.items():
-            if view_name not in importance_concat_dict:
-                importance_concat_dict[view_name] = feature_importances
-            else:
-                importance_concat_dict[view_name] = pd.concat(
-                    [importance_concat_dict[view_name], feature_importances])
-
-    for view_name, dataframe in importance_concat_dict.items():
-        feature_importances_analysis[view_name] = dataframe.groupby(
-            dataframe.index).mean()
-
-        feature_importances_stds[view_name] = dataframe.groupby(
-            dataframe.index).std(ddof=0)
-
-    added_example_errors = {}
-    for example_errors in iter_results_lists["example_errors"]:
-        for classifier_name, errors in example_errors.items():
-            if classifier_name not in added_example_errors:
-                added_example_errors[classifier_name] = errors
-            else:
-                added_example_errors[classifier_name] += errors
-    error_analysis = added_example_errors
-    return metrics_analysis, class_metrics_analysis ,error_analysis, \
-           feature_importances_analysis, \
-           feature_importances_stds, iter_results_lists["labels"], \
-           duration_means, duration_stds
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py
deleted file mode 100644
index 459f664fb6231161e0e75a10ed3009e0dd27950c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/feature_importances.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import plotly
-import pandas as pd
-import numpy as np
-
-from ..monoview.monoview_utils import MonoviewResult
-
-
-def get_feature_importances(result, feature_names=None):
-    r"""Extracts the feature importance from the monoview results and stores
-    them in a dictionnary :
-    feature_importance[view_name] is a pandas.DataFrame of size n_feature*n_clf
-    containing a score of importance for each feature.
-
-    Parameters
-    ----------
-    result : list of results
-
-    Returns
-    -------
-    feature_importances : dict of pd.DataFrame
-        The dictionary containing all the feature importance for each view as
-        pandas DataFrames
-    """
-    feature_importances = {}
-    for classifier_result in result:
-        if isinstance(classifier_result, MonoviewResult):
-            if classifier_result.view_name not in feature_importances:
-                feature_importances[classifier_result.view_name] = pd.DataFrame(
-                    index=feature_names)
-            if hasattr(classifier_result.clf, 'feature_importances_'):
-                feature_importances[classifier_result.view_name][
-                    classifier_result.classifier_name] = classifier_result.clf.feature_importances_
-            else:
-                feature_importances[classifier_result.view_name][
-                    classifier_result.classifier_name] = np.zeros(
-                    classifier_result.n_features)
-    return feature_importances
-
-def publish_feature_importances(feature_importances, directory, database_name,
-                                feature_stds=None):  # pragma: no cover
-    for view_name, feature_importance in feature_importances.items():
-        if not os.path.exists(os.path.join(directory, "feature_importances")):
-            os.mkdir(os.path.join(directory, "feature_importances"))
-        file_name = os.path.join(directory, "feature_importances",
-                                 database_name + "-" + view_name
-                                 + "-feature_importances")
-        if feature_stds is not None:
-            feature_std = feature_stds[view_name]
-            feature_std.to_csv(file_name + "_dataframe_stds.csv")
-        else:
-            feature_std = pd.DataFrame(data=np.zeros(feature_importance.shape),
-                                       index=feature_importance.index,
-                                       columns=feature_importance.columns)
-        plot_feature_importances(file_name, feature_importance, feature_std)
-
-
-def plot_feature_importances(file_name, feature_importance, feature_std): # pragma: no cover
-    feature_importance.to_csv(file_name + "_dataframe.csv")
-    hover_text = [["-Feature :" + str(feature_name) +
-                   "<br>-Classifier : " + classifier_name +
-                   "<br>-Importance : " + str(
-        feature_importance.loc[feature_name][classifier_name]) +
-                   "<br>-STD : " + str(
-        feature_std.loc[feature_name][classifier_name])
-                   for classifier_name in list(feature_importance.columns)]
-                  for feature_name in list(feature_importance.index)]
-    fig = plotly.graph_objs.Figure(data=plotly.graph_objs.Heatmap(
-        x=list(feature_importance.columns),
-        y=list(feature_importance.index),
-        z=feature_importance.values,
-        text=hover_text,
-        hoverinfo=["text"],
-        colorscale="Greys",
-        reversescale=False))
-    fig.update_layout(
-        xaxis={"showgrid": False, "showticklabels": False, "ticks": ''},
-        yaxis={"showgrid": False, "showticklabels": False, "ticks": ''})
-    fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                      plot_bgcolor='rgba(0,0,0,0)')
-    plotly.offline.plot(fig, filename=file_name + ".html", auto_open=False)
-
-    del fig
-
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py
deleted file mode 100644
index fff1e36511fd9ac1952ce6af6b5d7e801ff0728b..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/metric_analysis.py
+++ /dev/null
@@ -1,393 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-import os
-import pandas as pd
-import plotly
-import logging
-
-from ..utils.organization import secure_file_path
-
-
-def get_metrics_scores(metrics, results, label_names):
-    r"""Used to extract metrics scores in case of classification
-
-    Parameters
-    ----------
-    metrics : dict
-        The metrics names with configuration metrics[i][0] = name of metric i
-    results : list of MonoviewResult and MultiviewResults objects
-        A list containing all the results for all the monoview experimentations.
-
-    Returns
-    -------
-    metricsScores : dict of dict of list
-        Regroups all the scores for each metrics for each classifier and for
-        the train and test sets.
-        organized as :
-        -`metricScores[metric_name]["classifiers_names"]` is a list of all the
-        classifiers available for this metric,
-        -`metricScores[metric_name]["train_scores"]` is a list of all the
-        available classifiers scores on the train set,
-        -`metricScores[metric_name]["test_scores"]` is a list of all the
-        available classifiers scores on the test set.
-    """
-    classifier_names = []
-    classifier_names = [classifier_result.get_classifier_name()
-                        for classifier_result in results
-                        if classifier_result.get_classifier_name()
-                        not in classifier_names]
-    metrics_scores = dict((metric, pd.DataFrame(data=np.zeros((2,
-                                                                  len(
-                                                                      classifier_names))),
-                                                   index=["train", "test"],
-                                                   columns=classifier_names))
-                          for metric in metrics.keys())
-    class_metric_scores = dict((metric, pd.DataFrame(
-        index=pd.MultiIndex.from_product([["train", "test"], label_names]),
-        columns=classifier_names, dtype=float))
-                               for metric in metrics)
-
-    for metric in metrics.keys():
-        for classifier_result in results:
-            metrics_scores[metric].loc[
-                "train", classifier_result.get_classifier_name()] = \
-            classifier_result.metrics_scores[metric][0]
-            metrics_scores[metric].loc[
-                "test", classifier_result.get_classifier_name()] = \
-                classifier_result.metrics_scores[metric][1]
-            for label_index, label_name in enumerate(label_names):
-                class_metric_scores[metric].loc[(
-                    "train", label_name),classifier_result.get_classifier_name()] = \
-                classifier_result.class_metric_scores[metric][0][label_index]
-                class_metric_scores[metric].loc[(
-                    "test", label_name), classifier_result.get_classifier_name()] = \
-                    classifier_result.class_metric_scores[metric][1][label_index]
-
-    return metrics_scores, class_metric_scores
-
-
-def publish_metrics_graphs(metrics_scores, directory, database_name,
-                           labels_names, class_metric_scores):  # pragma: no cover
-    r"""Used to sort the results (names and both scores) in descending test
-    score order.
-
-    Parameters
-    ----------
-    metrics_scores : dict of dicts of lists or np.arrays
-        Keys : The names of the metrics.
-        Values : The scores and names of each classifier .
-    directory : str
-        The path to the directory where the figures will be saved.
-    database_name : str
-        The name of the database on which the experiments where conducted.
-    labels_names : list of strs
-        The name corresponding to each numerical label.
-
-    Returns
-    -------
-    results
-    """
-    results = []
-    for metric_name in metrics_scores.keys():
-        logging.debug(
-            "Start:\t Score graph generation for " + metric_name)
-        train_scores, test_scores, classifier_names, \
-        file_name, nb_results, results,\
-        class_test_scores = init_plot(results, metric_name,
-                                      metrics_scores[metric_name],
-                                      directory,
-                                      database_name,
-                                      class_metric_scores[metric_name])
-
-        plot_metric_scores(train_scores, test_scores, classifier_names,
-                           nb_results, metric_name, file_name,
-                           tag=" " + " vs ".join(labels_names))
-
-        class_file_name = os.path.join(directory, database_name + "-"
-                             + metric_name+"-class")
-        plot_class_metric_scores(class_test_scores, class_file_name,
-                                 labels_names, classifier_names, metric_name)
-        logging.debug(
-            "Done:\t Score graph generation for " + metric_name)
-    return results
-
-
-def publish_all_metrics_scores(iter_results, class_iter_results, directory,
-                               data_base_name, stats_iter, label_names,
-                               min_size=10): # pragma: no cover
-    results = []
-    secure_file_path(os.path.join(directory, "a"))
-
-    for metric_name, scores in iter_results.items():
-        train = np.array(scores["mean"].loc["train"])
-        test = np.array(scores["mean"].loc["test"])
-        classifier_names = np.array(scores["mean"].columns)
-        train_std = np.array(scores["std"].loc["train"])
-        test_std = np.array(scores["std"].loc["test"])
-
-        file_name = os.path.join(directory, data_base_name + "-mean_on_" + str(
-            stats_iter) + "_iter-" + metric_name)
-        nb_results = classifier_names.shape[0]
-
-        plot_metric_scores(train, test, classifier_names, nb_results,
-                           metric_name, file_name, tag=" averaged",
-                           train_STDs=train_std, test_STDs=test_std)
-        results += [[classifier_name, metric_name, test_mean, test_std]
-                    for classifier_name, test_mean, test_std
-                    in zip(classifier_names, test, test_std)]
-
-    for metric_name, scores in class_iter_results.items():
-        test = np.array([np.array(scores["mean"].iloc[i, :]) for i in range(scores["mean"].shape[0]) if scores["mean"].iloc[i, :].name[0]=='test'])
-        classifier_names = np.array(scores["mean"].columns)
-        test_std = np.array([np.array(scores["std"].iloc[i, :]) for i in range(scores["std"].shape[0]) if scores["std"].iloc[i, :].name[0]=='test'])
-
-        file_name = os.path.join(directory, data_base_name + "-mean_on_" + str(
-            stats_iter) + "_iter-" + metric_name+"-class")
-
-        plot_class_metric_scores(test, file_name, label_names, classifier_names, metric_name, stds=test_std, tag="averaged")
-    return results
-
-def init_plot(results, metric_name, metric_dataframe,
-              directory, database_name, class_metric_scores):
-    train = np.array(metric_dataframe.loc["train"])
-    test = np.array(metric_dataframe.loc["test"])
-    class_test = np.array(class_metric_scores.loc["test"])
-    classifier_names = np.array(metric_dataframe.columns)
-
-    nb_results = metric_dataframe.shape[1]
-
-    file_name = os.path.join(directory, database_name + "-" + metric_name)
-
-    results += [[classifiers_name, metric_name, test_mean, test_std, class_mean]
-                for classifiers_name, test_mean, class_mean, test_std in
-                zip(classifier_names, test, np.transpose(class_test),
-                    np.zeros(len(test)))]
-    return train, test, classifier_names, file_name, nb_results, results, \
-           class_test
-
-
-def plot_metric_scores(train_scores, test_scores, names, nb_results,
-                       metric_name,
-                       file_name,
-                       tag="", train_STDs=None, test_STDs=None,
-                       use_plotly=True): # pragma: no cover
-    r"""Used to plot and save the score barplot for a specific metric.
-
-    Parameters
-    ----------
-    train_scores : list or np.array of floats
-        The scores of each classifier on the training set.
-    test_scores : list or np.array of floats
-        The scores of each classifier on the testing set.
-    names : list or np.array of strs
-        The names of all the classifiers.
-    nb_results: int
-        The number of classifiers to plot.
-    metric_name : str
-        The plotted metric's name
-    file_name : str
-        The name of the file where the figure will be saved.
-    tag : str
-        Some text to personalize the title, must start with a whitespace.
-    train_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the training set.
-    test_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the testing set.
-
-    Returns
-    -------
-    """
-
-    figKW, barWidth = get_fig_size(nb_results)
-
-    names, train_scores, test_scores, train_STDs, test_STDs = sort_by_test_score(
-        train_scores, test_scores, names,
-        train_STDs, test_STDs)
-
-    f, ax = plt.subplots(nrows=1, ncols=1, **figKW)
-    ax.set_title(metric_name + "\n" + tag + " scores for each classifier")
-
-    rects = ax.bar(range(nb_results), test_scores, barWidth, color="0.1",
-                   yerr=test_STDs)
-    rect2 = ax.bar(np.arange(nb_results) + barWidth, train_scores, barWidth,
-                   color="0.8", yerr=train_STDs)
-    autolabel(rects, ax, set=1, std=test_STDs)
-    autolabel(rect2, ax, set=2, std=train_STDs)
-    ax.legend((rects[0], rect2[0]), ('Test', 'Train'))
-    ax.set_ylim(-0.1, 1.1)
-    ax.set_xticks(np.arange(nb_results) + barWidth / 2)
-    ax.set_xticklabels(names, rotation="vertical")
-
-    try:
-        plt.tight_layout()
-    except:
-        pass
-    f.savefig(file_name + '.png', transparent=True)
-    plt.close()
-    import pandas as pd
-    if train_STDs is None:
-        dataframe = pd.DataFrame(np.transpose(np.concatenate((
-            train_scores.reshape((train_scores.shape[0], 1)),
-            test_scores.reshape((train_scores.shape[0], 1))), axis=1)),
-            columns=names, index=["Train", "Test"])
-    else:
-        dataframe = pd.DataFrame(np.transpose(np.concatenate((
-            train_scores.reshape((train_scores.shape[0], 1)),
-            train_STDs.reshape((train_scores.shape[0], 1)),
-            test_scores.reshape((train_scores.shape[0], 1)),
-            test_STDs.reshape((train_scores.shape[0], 1))), axis=1)),
-            columns=names, index=["Train", "Train STD", "Test", "Test STD"])
-    dataframe.to_csv(file_name + ".csv")
-    if use_plotly:
-        fig = plotly.graph_objs.Figure()
-        fig.add_trace(plotly.graph_objs.Bar(
-            name='Train',
-            x=names, y=train_scores,
-            error_y=dict(type='data', array=train_STDs),
-            marker_color="lightgrey",
-        ))
-        fig.add_trace(plotly.graph_objs.Bar(
-            name='Test',
-            x=names, y=test_scores,
-            error_y=dict(type='data', array=test_STDs),
-            marker_color="black",
-        ))
-
-        fig.update_layout(
-            title=metric_name + "<br>" + tag + " scores for each classifier")
-        fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                          plot_bgcolor='rgba(0,0,0,0)')
-        plotly.offline.plot(fig, filename=file_name + ".html", auto_open=False)
-        del fig
-
-
-def plot_class_metric_scores(class_test_scores, class_file_name,
-                             labels_names, classifier_names, metric_name,
-                             stds=None, tag=""): # pragma: no cover
-    fig = plotly.graph_objs.Figure()
-    for lab_index, scores in enumerate(class_test_scores):
-        if stds is None:
-            std = None
-        else:
-            std = stds[lab_index]
-        fig.add_trace(plotly.graph_objs.Bar(
-            name=labels_names[lab_index],
-            x=classifier_names, y=scores,
-            error_y=dict(type='data', array=std),
-            ))
-    fig.update_layout(
-        title=metric_name + "<br>" + tag + " scores for each classifier")
-    fig.update_layout(paper_bgcolor='rgba(0,0,0,0)',
-                      plot_bgcolor='rgba(0,0,0,0)')
-    plotly.offline.plot(fig, filename=class_file_name + ".html", auto_open=False)
-    del fig
-
-
-def get_fig_size(nb_results, min_size=15, multiplier=1.0, bar_width=0.35):
-    r"""Used to get the image size to save the figure and the bar width, depending on the number of scores to plot.
-
-    Parameters
-    ----------
-    nb_results : int
-        The number of couple of bar to plot.
-    min_size : int
-        The minimum size of the image, if there are few classifiers to plot.
-    multiplier : float
-        The ratio between the image size and the number of classifiers.
-    bar_width : float
-        The width of the bars in the figure. Mainly here to centralize bar_width.
-
-    Returns
-    -------
-    fig_kwargs : dict of arguments
-        The argument restraining the size of the figure, usable directly in the `subplots` function of
-        `matplotlib.pyplot`.
-    bar_width : float
-        The width of the bars in the figure. Mainly here to centralize bar_width.
-    """
-    size = nb_results * multiplier
-    if size < min_size:
-        size = min_size
-    fig_kwargs = {"figsize": (size, size / 3)}
-    return fig_kwargs, bar_width
-
-
-def autolabel(rects, ax, set=1, std=None): # pragma: no cover
-    r"""Used to print the score below the bars.
-
-    Parameters
-    ----------
-    rects : pyplot bar object
-        THe bars.
-    ax : pyplot ax object
-        The ax.
-    set : integer
-        1 means the test scores, anything else means the train score
-    std: None or array
-        The standard deviations in the case of statsIter results.
-
-    Returns
-    -------
-    """
-    if set == 1:
-        text_height = -0.05
-        weight = "bold"
-    else:
-        text_height = -0.07
-        weight = "normal"
-    for rectIndex, rect in enumerate(rects):
-        height = rect.get_height()
-        if std is not None:
-            ax.text(rect.get_x() + rect.get_width() / 2., text_height,
-                    "%.2f" % height + u'\u00B1' + "%.2f" % std[rectIndex],
-                    weight=weight,
-                    ha='center', va='bottom', size="x-small")
-        else:
-            ax.text(rect.get_x() + rect.get_width() / 2., text_height,
-                    "%.2f" % height, weight=weight,
-                    ha='center', va='bottom', size="small")
-
-
-def sort_by_test_score(train_scores, test_scores, names, train_STDs=None,
-                       test_STDs=None):
-    r"""Used to sort the results (names and both scores) in descending test score order.
-
-    Parameters
-    ----------
-    train_scores : np.array of floats
-        The scores of each classifier on the training set.
-    test_scores : np.array of floats
-        The scores of each classifier on the testing set.
-    names : np.array of strs
-        The names of all the classifiers.
-    train_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the training set.
-    test_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the testing set.
-
-    Returns
-    -------
-    sorted_names : np.array of strs
-        The names of all the classifiers, sorted in descending test score order.
-    sorted_train_scores : np.array of floats
-        The scores of each classifier on the training set, sorted in descending test score order.
-    sorted_test_scores : np.array of floats
-        The scores of each classifier on the testing set, sorted in descending test score order.
-    sorted_train_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the training set,
-        sorted in descending test score order.
-    sorted_test_STDs : np.array of floats or None
-        The array containing the standard deviations for the averaged scores on the testing set,
-        sorted in descending test score order.
-    """
-    sorted_indices = np.argsort(test_scores)
-    sorted_test_scores = test_scores[sorted_indices]
-    sorted_train_scores = train_scores[sorted_indices]
-    sorted_names = names[sorted_indices]
-    if train_STDs is not None and test_STDs is not None:
-        sorted_train_STDs = train_STDs[sorted_indices]
-        sorted_test_STDs = test_STDs[sorted_indices]
-    else:
-        sorted_train_STDs = None
-        sorted_test_STDs = None
-    return sorted_names, sorted_train_scores, sorted_test_scores, sorted_train_STDs, sorted_test_STDs
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/noise_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/noise_analysis.py
deleted file mode 100644
index b4fc81215d5b50564d98108262a332adf617932c..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/noise_analysis.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# import numpy as np
-# import pandas as pd
-# import matplotlib.pyplot as plt
-# import os
-# from matplotlib.patches import Patch
-#
-#
-# def plot_results_noise(directory, noise_results, metric_to_plot, name,
-#                        width=0.1):
-#     avail_colors = ["tab:blue", "tab:orange", "tab:brown", "tab:gray",
-#                     "tab:olive", "tab:red", ]
-#     colors = {}
-#     lengend_patches = []
-#     noise_levels = np.array([noise_level for noise_level, _ in noise_results])
-#     df = pd.DataFrame(
-#         columns=['noise_level', 'classifier_name', 'mean_score', 'score_std'], )
-#     if len(noise_results) > 1:
-#         width = np.min(np.diff(noise_levels))
-#     for noise_level, noise_result in noise_results:
-#         classifiers_names, meaned_metrics, metric_stds = [], [], []
-#         for noise_result in noise_result:
-#             classifier_name = noise_result[0].split("-")[0]
-#             if noise_result[1] is metric_to_plot:
-#                 classifiers_names.append(classifier_name)
-#                 meaned_metrics.append(noise_result[2])
-#                 metric_stds.append(noise_result[3])
-#                 if classifier_name not in colors:
-#                     try:
-#                         colors[classifier_name] = avail_colors.pop(0)
-#                     except IndexError:
-#                         colors[classifier_name] = "k"
-#         classifiers_names, meaned_metrics, metric_stds = np.array(
-#             classifiers_names), np.array(meaned_metrics), np.array(metric_stds)
-#         sorted_indices = np.argsort(-meaned_metrics)
-#         for index in sorted_indices:
-#             row = pd.DataFrame(
-#                 {'noise_level': noise_level,
-#                  'classifier_name': classifiers_names[index],
-#                  'mean_score': meaned_metrics[index],
-#                  'score_std': metric_stds[index]}, index=[0])
-#             df = pd.concat([df, row])
-#             plt.bar(noise_level, meaned_metrics[index], yerr=metric_stds[index],
-#                     width=0.5 * width, label=classifiers_names[index],
-#                     color=colors[classifiers_names[index]])
-#     for classifier_name, color in colors.items():
-#         lengend_patches.append(Patch(facecolor=color, label=classifier_name))
-#     plt.legend(handles=lengend_patches, loc='lower center',
-#                bbox_to_anchor=(0.5, 1.05), ncol=2)
-#     plt.ylabel(metric_to_plot)
-#     plt.title(name)
-#     plt.xticks(noise_levels)
-#     plt.xlabel("Noise level")
-#     plt.savefig(os.path.join(directory, name + "_noise_analysis.png"))
-#     plt.close()
-#     df.to_csv(os.path.join(directory, name + "_noise_analysis.csv"))
diff --git a/multiview_platform/mono_multi_view_classifiers/result_analysis/tracebacks_analysis.py b/multiview_platform/mono_multi_view_classifiers/result_analysis/tracebacks_analysis.py
deleted file mode 100644
index 329a27f6fe98c23b94b1053847c7482165d970d4..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/result_analysis/tracebacks_analysis.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-
-
-def publish_tracebacks(directory, database_name, labels_names, tracebacks,
-                       iter_index):
-    if tracebacks:
-        with open(os.path.join(directory, database_name +
-                                          "-iter" + str(iter_index) +
-                                          "-tacebacks.txt"),
-                  "w") as traceback_file:
-            failed_list = save_dict_to_text(tracebacks, traceback_file)
-        flagged_list = [_ + "-iter" + str(iter_index) for _ in failed_list]
-    else:
-        flagged_list = {}
-    return flagged_list
-
-
-def save_dict_to_text(dictionnary, output_file):
-    # TODO : smarter way must exist
-    output_file.write("Failed algorithms : \n\t" + ",\n\t".join(
-        dictionnary.keys()) + ".\n\n\n")
-    for key, value in dictionnary.items():
-        output_file.write(key)
-        output_file.write("\n\n")
-        output_file.write(value)
-        output_file.write("\n\n\n")
-    return dictionnary.keys()
-
-
-def save_failed(failed_list, directory):
-    with open(os.path.join(directory, "failed_algorithms.txt"),
-              "w") as failed_file:
-        failed_file.write(
-            "The following algorithms sent an error, the tracebacks are stored "
-            "in the coressponding directory :\n")
-        failed_file.write(", \n".join(failed_list) + ".")
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/__init__.py b/multiview_platform/mono_multi_view_classifiers/utils/__init__.py
deleted file mode 100644
index e0473b520b385389e967e567261bdb95a360aa37..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from . import dataset, execution, hyper_parameter_search, transformations
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/base.py b/multiview_platform/mono_multi_view_classifiers/utils/base.py
deleted file mode 100644
index 34894b5a6892d47b1a3843c55f7dfc30b84e97b2..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/base.py
+++ /dev/null
@@ -1,377 +0,0 @@
-import numpy as np
-from sklearn.base import BaseEstimator
-from abc import abstractmethod
-from datetime import timedelta as hms
-from tabulate import tabulate
-from sklearn.metrics import confusion_matrix as confusion
-from sklearn.tree import DecisionTreeClassifier
-from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
-
-from multiview_platform.mono_multi_view_classifiers import metrics
-
-
-class BaseClassifier(BaseEstimator, ):
-
-    def gen_best_params(self, detector):
-        """
-        return best parameters of detector
-        Parameters
-        ----------
-        detector :
-
-        Returns
-        -------
-        best param : dictionary with param name as key and best parameters
-            value
-        """
-        return dict(
-            (param_name, detector.best_params_[param_name]) for param_name in
-            self.param_names)
-
-    def gen_params_from_detector(self, detector):
-        if self.classed_params:
-            classed_dict = dict((classed_param, get_names(
-                detector.cv_results_["param_" + classed_param]))
-                                for classed_param in self.classed_params)
-        if self.param_names:
-            return [(param_name,
-                     np.array(detector.cv_results_["param_" + param_name]))
-                    if param_name not in self.classed_params else (
-                param_name, classed_dict[param_name])
-                    for param_name in self.param_names]
-        else:
-            return [()]
-
-    def gen_distribs(self):
-        return dict((param_name, distrib) for param_name, distrib in
-                    zip(self.param_names, self.distribs))
-
-    def params_to_string(self):
-        """
-        Formats the parameters of the classifier as a string
-        """
-        return ", ".join(
-            [param_name + " : " + self.to_str(param_name) for param_name in
-             self.param_names])
-
-    def get_config(self):
-        """
-        Generates a string to containing all the information about the
-        classifier's configuration
-        """
-        if self.param_names:
-            return self.__class__.__name__ + " with " + self.params_to_string()
-        else:
-            return self.__class__.__name__ + " with no config."
-
-    def get_base_estimator(self, base_estimator, estimator_config):
-        if estimator_config is None:
-            estimator_config = {}
-        if base_estimator is None:
-            return DecisionTreeClassifier(**estimator_config)
-        if isinstance(base_estimator, str):  # pragma: no cover
-            if base_estimator == "DecisionTreeClassifier":
-                return DecisionTreeClassifier(**estimator_config)
-            elif base_estimator == "AdaboostClassifier":
-                return AdaBoostClassifier(**estimator_config)
-            elif base_estimator == "RandomForestClassifier":
-                return RandomForestClassifier(**estimator_config)
-            else:
-                raise ValueError('Base estimator string {} does not match an available classifier.'.format(base_estimator))
-        elif isinstance(base_estimator, BaseEstimator):
-            return base_estimator.set_params(**estimator_config)
-        else:
-            raise ValueError('base_estimator must be either a string or a BaseEstimator child class, it is {}'.format(type(base_estimator)))
-
-
-    def to_str(self, param_name):
-        """
-        Formats a parameter into a string
-        """
-        if param_name in self.weird_strings:
-            string = ""
-            if "class_name" in self.weird_strings[param_name]:
-                string += self.get_params()[param_name].__class__.__name__
-            if "config" in self.weird_strings[param_name]:
-                string += "( with " + self.get_params()[
-                    param_name].params_to_string() + ")"
-        else:
-            return str(self.get_params()[param_name])
-
-    def get_interpretation(self, directory, base_file_name, y_test,
-                           multi_class=False):
-        """
-        Base method that returns an empty string if there is not interpretation
-        method in the classifier's module
-        """
-        return ""
-
-    def accepts_multi_class(self, random_state, n_samples=10, dim=2,
-                            n_classes=3):
-        """
-        Base function to test if the classifier accepts a multiclass task.
-        It is highly recommended to overwrite it with a simple method that
-        returns True or False in the classifier's module, as it will speed up
-        the benchmark
-        """
-        if int(n_samples / n_classes) < 1:
-            raise ValueError(
-                "n_samples ({}) / n_class ({}) must be over 1".format(
-                    n_samples,
-                    n_classes))
-        # if hasattr(self, "accepts_mutli_class"):
-        #     return self.accepts_multi_class
-        fake_mc_X = random_state.randint(low=0, high=101,
-                                                 size=(n_samples, dim))
-        fake_mc_y = [class_index
-                     for _ in range(int(n_samples / n_classes))
-                     for class_index in range(n_classes)]
-        fake_mc_y += [0 for _ in range(n_samples % n_classes)]
-        fake_mc_y = np.asarray(fake_mc_y)
-        try:
-            self.fit(fake_mc_X, fake_mc_y)
-            # self.predict(fake_mc_X)
-            return True
-        except ValueError:
-            return False
-
-
-def get_names(classed_list):
-    return np.array([object_.__class__.__name__ for object_ in classed_list])
-
-
-def get_metric(metrics_dict):
-    """
-    Fetches the metric module in the metrics package
-    """
-    for metric_name, metric_kwargs in metrics_dict.items():
-        if metric_name.endswith("*"):
-            princ_metric_name = metric_name[:-1]
-            princ_metric_kwargs = metric_kwargs
-    metric_module = getattr(metrics, princ_metric_name)
-    return metric_module, princ_metric_kwargs
-
-
-class ResultAnalyser():
-    """
-    A shared result analysis tool for mono and multiview classifiers.
-    The main utility of this class is to generate a txt file summarizing
-    the results and possible interpretation for the classifier.
-    """
-
-    def __init__(self, classifier, classification_indices, k_folds,
-                 hps_method, metrics_dict, n_iter, class_label_names,
-                 pred, directory, base_file_name, labels,
-                 database_name, nb_cores, duration):
-        """
-
-        Parameters
-        ----------
-        classifier: estimator used for classification
-
-        classification_indices: list of indices for train test sets
-
-        k_folds: the sklearn StratifiedkFolds object
-
-        hps_method: string naming the hyper-parameter search method
-
-        metrics_dict: list of the metrics to compute on the results
-
-        n_iter: number of HPS iterations
-
-        class_label_names: list of the names of the labels
-
-        train_pred: classifier's prediction on the training set
-
-        test_pred: classifier's prediction on the testing set
-
-        directory: directory where to save the result analysis
-
-        labels: the full labels array (Y in sklearn)
-
-        database_name: the name of the database
-
-        nb_cores: number of cores/threads use for the classification
-
-        duration: duration of the classification
-        """
-        self.classifier = classifier
-        self.train_indices, self.test_indices = classification_indices
-        self.k_folds = k_folds
-        self.hps_method = hps_method
-        self.metrics_dict = metrics_dict
-        self.n_iter = n_iter
-        self.class_label_names = class_label_names
-        self.pred = pred
-        self.directory = directory
-        self.base_file_name = base_file_name
-        self.labels = labels
-        self.string_analysis = ""
-        self.database_name = database_name
-        self.nb_cores = nb_cores
-        self.duration = duration
-        self.metric_scores = {}
-        self.class_metric_scores = {}
-
-    def get_all_metrics_scores(self, ):
-        """
-        Get the scores for all the metrics in the list
-        Returns
-        -------
-        """
-        for metric, metric_args in self.metrics_dict.items():
-            class_train_scores, class_test_scores, train_score, test_score\
-                = self.get_metric_score(metric, metric_args)
-            self.class_metric_scores[metric] = (class_train_scores,
-                                                class_test_scores)
-            self.metric_scores[metric] = (train_score, test_score)
-
-    def get_metric_score(self, metric, metric_kwargs):
-        """
-        Get the train and test scores for a specific metric and its arguments
-
-        Parameters
-        ----------
-
-        metric : name of the metric, must be implemented in metrics
-
-        metric_kwargs : the dictionary containing the arguments for the metric.
-
-        Returns
-        -------
-        train_score, test_score
-        """
-        if not metric.endswith("*"):
-            metric_module = getattr(metrics, metric)
-        else:
-            metric_module = getattr(metrics, metric[:-1])
-        class_train_scores = []
-        class_test_scores = []
-        for label_value in np.unique(self.labels):
-            train_example_indices = self.train_indices[np.where(self.labels[self.train_indices]==label_value)[0]]
-            test_example_indices = self.test_indices[np.where(self.labels[self.test_indices] == label_value)[0]]
-            class_train_scores.append(metric_module.score(y_true=self.labels[train_example_indices],
-                                              y_pred=self.pred[train_example_indices],
-                                              **metric_kwargs))
-            class_test_scores.append(metric_module.score(y_true=self.labels[test_example_indices],
-                                             y_pred=self.pred[test_example_indices],
-                                             **metric_kwargs))
-        train_score = metric_module.score(y_true=self.labels[self.train_indices],
-                                              y_pred=self.pred[self.train_indices],
-                                              **metric_kwargs)
-        test_score = metric_module.score(y_true=self.labels[self.test_indices],
-                                              y_pred=self.pred[self.test_indices],
-                                              **metric_kwargs)
-        return class_train_scores, class_test_scores, train_score, test_score
-
-    def print_metric_score(self,):
-        """
-        Generates a string, formatting the metrics configuration and scores
-
-        Parameters
-        ----------
-        metric_scores : dictionary of train_score, test_score for each metric
-
-        metric_list : list of metrics
-
-        Returns
-        -------
-        metric_score_string string formatting all metric results
-        """
-        metric_score_string = "\n\n"
-        for metric, metric_kwargs in self.metrics_dict.items():
-            if metric.endswith("*"):
-                metric_module = getattr(metrics, metric[:-1])
-            else:
-                metric_module = getattr(metrics, metric)
-            metric_score_string += "\tFor {} : ".format(metric_module.get_config(
-                **metric_kwargs))
-            metric_score_string += "\n\t\t- Score on train : {}".format(self.metric_scores[metric][0])
-            metric_score_string += "\n\t\t- Score on test : {}".format(self.metric_scores[metric][1])
-            metric_score_string += "\n\n"
-        metric_score_string += "Test set confusion matrix : \n\n"
-        self.confusion_matrix = confusion(y_true=self.labels[self.test_indices], y_pred=self.pred[self.test_indices])
-        formatted_conf = [[label_name]+list(row) for label_name, row in zip(self.class_label_names, self.confusion_matrix)]
-        metric_score_string+=tabulate(formatted_conf, headers= ['']+self.class_label_names, tablefmt='fancy_grid')
-        metric_score_string += "\n\n"
-        return metric_score_string
-
-    @abstractmethod
-    def get_view_specific_info(self): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def get_base_string(self): # pragma: no cover
-        pass
-
-    def get_db_config_string(self,):
-        """
-        Generates a string, formatting all the information on the database
-
-        Parameters
-        ----------
-
-        Returns
-        -------
-        db_config_string string, formatting all the information on the database
-        """
-        learning_ratio = len(self.train_indices) / (
-                len(self.train_indices) + len(self.test_indices))
-        db_config_string = "Database configuration : \n"
-        db_config_string += "\t- Database name : {}\n".format(self.database_name)
-        db_config_string += self.get_view_specific_info()
-        db_config_string += "\t- Learning Rate : {}\n".format(learning_ratio)
-        db_config_string += "\t- Labels used : " + ", ".join(
-            self.class_label_names) + "\n"
-        db_config_string += "\t- Number of cross validation folds : {}\n\n".format(self.k_folds.n_splits)
-        return db_config_string
-
-    def get_classifier_config_string(self, ):
-        """
-        Formats the information about the classifier and its configuration
-
-        Returns
-        -------
-        A string explaining the classifier's configuration
-        """
-        classifier_config_string = "Classifier configuration : \n"
-        classifier_config_string += "\t- " + self.classifier.get_config()+ "\n"
-        classifier_config_string += "\t- Executed on {} core(s) \n".format(
-            self.nb_cores)
-
-        if self.hps_method.startswith('randomized_search'):
-            classifier_config_string += "\t- Got configuration using randomized search with {}  iterations \n" .format(self.n_iter)
-        return classifier_config_string
-
-    def analyze(self, ):
-        """
-        Main function used in the monoview and multiview classification scripts
-
-        Returns
-        -------
-        string_analysis : a string that will be stored in the log and in a txt
-        file
-        image_analysis : a list of images to save
-        metric_scores : a dictionary of {metric: (train_score, test_score)}
-        used in later analysis.
-        """
-        string_analysis = self.get_base_string()
-        string_analysis += self.get_db_config_string()
-        string_analysis += self.get_classifier_config_string()
-        self.get_all_metrics_scores()
-        string_analysis += self.print_metric_score()
-        string_analysis += "\n\n Classification took {}".format(hms(seconds=int(self.duration)))
-        string_analysis += "\n\n Classifier Interpretation : \n"
-        string_analysis += self.classifier.get_interpretation(
-            self.directory, self.base_file_name,
-            self.labels[self.test_indices])
-        image_analysis = {}
-        return string_analysis, image_analysis, self.metric_scores, \
-               self.class_metric_scores, self.confusion_matrix
-
-
-base_boosting_estimators = [DecisionTreeClassifier(max_depth=1),
-                            DecisionTreeClassifier(max_depth=2),
-                            DecisionTreeClassifier(max_depth=3),
-                            DecisionTreeClassifier(max_depth=4),
-                            DecisionTreeClassifier(max_depth=5), ]
\ No newline at end of file
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py b/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
deleted file mode 100644
index fcd62c6d94ef3f24dec3dc80aa7a992400b7fa67..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/configuration.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import os
-
-import yaml
-
-
-def get_the_args(path_to_config_file="../config_files/config.yml"):
-    """
-    The function for extracting the args for a '.yml' file.
-
-    Parameters
-    ----------
-    path_to_config_file : str, path to the yml file containing the configuration
-
-    Returns
-    -------
-    yaml_config : dict, the dictionary conaining the configuration for the
-    benchmark
-
-    """
-    with open(path_to_config_file, 'r') as stream:
-        yaml_config = yaml.safe_load(stream)
-    return pass_default_config(**yaml_config)
-
-
-def pass_default_config(log=True,
-                        name=["plausible", ],
-                        label="_",
-                        file_type=".hdf5",
-                        views=None,
-                        pathf="../data/",
-                        nice=0,
-                        random_state=42,
-                        nb_cores=1,
-                        full=True,
-                        debug=False,
-                        add_noise=False,
-                        noise_std=0.0,
-                        res_dir="../results/",
-                        track_tracebacks=True,
-                        split=0.49,
-                        nb_folds=5,
-                        nb_class=None,
-                        classes=None,
-                        type=["multiview", ],
-                        algos_monoview=["all"],
-                        algos_multiview=["svm_jumbo_fusion", ],
-                        stats_iter=2,
-                        metrics={"accuracy_score":{}, "f1_score":{}},
-                        metric_princ="accuracy_score",
-                        hps_type="Random",
-                        hps_iter=1,
-                        hps_kwargs={'n_iter':10, "equivalent_draws":True},
-                        **kwargs):
-    """
-
-    :param log:
-    :param name:
-    :param label:
-    :param file_type:
-    :param views:
-    :param pathf:
-    :param nice:
-    :param random_state:
-    :param nb_cores:
-    :param full:
-    :param debug:
-    :param add_noise:
-    :param noise_std:
-    :param res_dir:
-    :param track_tracebacks:
-    :param split:
-    :param nb_folds:
-    :param nb_class:
-    :param classes:
-    :param type:
-    :param algos_monoview:
-    :param algos_multiview:
-    :param stats_iter:
-    :param metrics:
-    :param metric_princ:
-    :param hps_type:
-    :param hps_iter:
-    :return:
-    """
-    args = dict(
-        (key, value) for key, value in locals().items() if key != "kwargs")
-    args = dict(args, **kwargs)
-    return args
-
-
-def save_config(directory, arguments):
-    """
-    Saves the config file in the result directory.
-    """
-    with open(os.path.join(directory, "config_file.yml"), "w") as stream:
-        yaml.dump(arguments, stream)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py b/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
deleted file mode 100644
index 00ea3aadd0a4237d32e85464042fd0cacb09abbc..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/dataset.py
+++ /dev/null
@@ -1,769 +0,0 @@
-import logging
-import os
-import select
-import sys
-from abc import abstractmethod
-
-import h5py
-import numpy as np
-from scipy import sparse
-
-from .organization import secure_file_path
-
-class Dataset():
-
-    @abstractmethod
-    def get_nb_examples(self): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def get_v(self, view_index, example_indices=None): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def get_label_names(self, example_indices=None): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def get_labels(self, example_indices=None): # pragma: no cover
-        pass
-
-    @abstractmethod
-    def filter(self, labels, label_names, example_indices, view_names,
-               path=None): # pragma: no cover
-        pass
-
-    def init_example_indices(self, example_indices=None):
-        """If no example indices are provided, selects all the examples."""
-        if example_indices is None:
-            return range(self.get_nb_examples())
-        else:
-            return example_indices
-
-    def get_shape(self, view_index=0, example_indices=None):
-        """
-        Gets the shape of the needed view on the asked examples
-
-        Parameters
-        ----------
-        view_index : int
-            The index of the view to extract
-        example_indices : numpy.ndarray
-            The array containing the indices of the examples to extract.
-
-        Returns
-        -------
-        Tuple containing the shape
-
-        """
-        return self.get_v(view_index, example_indices=example_indices).shape
-
-    def to_numpy_array(self, example_indices=None, view_indices=None):
-        """
-        To concatenate the needed views in one big numpy array while saving the
-        limits of each view in a list, to be able to retrieve them later.
-
-        Parameters
-        ----------
-        example_indices : array like,
-        The indices of the examples to extract from the dataset
-
-        view_indices : array like,
-        The indices of the view to concatenate in the numpy array
-
-        Returns
-        -------
-        concat_views : numpy array,
-        The numpy array containing all the needed views.
-
-        view_limits : list of int
-        The limits of each slice used to extract the views.
-
-        """
-        view_limits = [0]
-        for view_index in view_indices:
-            view_data = self.get_v(view_index, example_indices=example_indices)
-            nb_features = view_data.shape[1]
-            view_limits.append(view_limits[-1] + nb_features)
-        concat_views = np.concatenate([self.get_v(view_index,
-                                                  example_indices=example_indices)
-                                       for view_index in view_indices], axis=1)
-        return concat_views, view_limits
-
-    def select_labels(self, selected_label_names):
-        selected_labels = [self.get_label_names().index(label_name.decode())
-                           if isinstance(label_name, bytes)
-                           else self.get_label_names().index(label_name)
-                           for label_name in selected_label_names]
-        selected_indices = np.array([index
-                                     for index, label in
-                                     enumerate(self.get_labels())
-                                     if label in selected_labels])
-        labels = np.array([selected_labels.index(self.get_labels()[idx])
-                           for idx in selected_indices])
-        return labels, selected_label_names, selected_indices
-
-    def select_views_and_labels(self, nb_labels=None,
-                                selected_label_names=None, random_state=None,
-                                view_names=None, path_for_new="../data/"):
-        if view_names is None and selected_label_names is None and nb_labels is None: # pragma: no cover
-            pass
-        else:
-            selected_label_names = self.check_selected_label_names(nb_labels,
-                                                                   selected_label_names,
-                                                                   random_state)
-            labels, label_names, example_indices = self.select_labels(
-                selected_label_names)
-            self.filter(labels, label_names, example_indices, view_names,
-                        path_for_new)
-        labels_dictionary = dict(
-            (labelIndex, labelName) for labelIndex, labelName in
-            enumerate(self.get_label_names()))
-        return labels_dictionary
-
-    def check_selected_label_names(self, nb_labels=None,
-                                   selected_label_names=None,
-                                   random_state=np.random.RandomState(42)):
-        if selected_label_names is None or nb_labels is None or len(
-                selected_label_names) < nb_labels:
-            if selected_label_names is None:
-                nb_labels_to_add = nb_labels
-                selected_label_names = []
-            elif nb_labels is not None:
-                nb_labels_to_add = nb_labels - len(selected_label_names)
-            else:
-                nb_labels_to_add = 0
-            labels_names_to_choose = [available_label_name
-                                      for available_label_name
-                                      in self.get_label_names()
-                                      if available_label_name
-                                      not in selected_label_names]
-            added_labels_names = random_state.choice(labels_names_to_choose,
-                                                     nb_labels_to_add,
-                                                     replace=False)
-            selected_label_names = list(selected_label_names) + list(
-                added_labels_names)
-        elif len(selected_label_names) > nb_labels:
-            selected_label_names = list(
-                random_state.choice(selected_label_names, nb_labels,
-                                    replace=False))
-
-        return selected_label_names
-
-
-class RAMDataset(Dataset):
-
-    def __init__(self, views=None, labels=None, are_sparse=False,
-                 view_names=None, labels_names=None, example_ids=None,
-                 name=None):
-        self.saved_on_disk = False
-        self.views = views
-        self.labels = np.asarray(labels)
-        if isinstance(are_sparse, bool): # pragma: no cover
-            self.are_sparse = [are_sparse for _ in range(len(views))]
-        else:
-            self.are_sparse = are_sparse
-        self.view_names = view_names
-        self.labels_names = labels_names
-        self.example_ids = example_ids
-        self.view_dict = dict((view_name, view_ind)
-                              for view_name, view_ind
-                              in zip(view_names, range(len(views))))
-        self.name = name
-        self.nb_view = len(self.views)
-        self.is_temp = False
-
-    def get_view_name(self, view_idx):
-        return self.view_names[view_idx]
-
-    def init_attrs(self):
-        """
-        Used to init the two attributes that are modified when self.dataset
-        changes
-
-        Returns
-        -------
-
-        """
-
-        self.nb_view = len(self.views)
-        self.view_dict = dict((view_ind, self.view_names[view_ind])
-                              for view_ind in range(self.nb_view))
-
-    def get_nb_examples(self):
-        return self.views[0].shape[0]
-
-    def get_label_names(self, example_indices=None, decode=True):
-        selected_labels = self.get_labels(example_indices)
-        if decode:
-            return [label_name.encode("utf-8")
-                    for label, label_name in enumerate(self.labels_names)
-                    if label in selected_labels]
-        else:
-            return [label_name.encode("utf-8")
-                    for label, label_name in enumerate(self.labels_names)
-                    if label in selected_labels]
-
-    def get_labels(self, example_indices=None):
-        example_indices = self.init_example_indices(example_indices)
-        return self.labels[example_indices]
-
-    def get_v(self, view_index, example_indices=None):
-        example_indices = self.init_example_indices(example_indices)
-        if type(example_indices) is int:
-            return self.views[view_index][example_indices, :]
-        else:
-            example_indices = np.asarray(example_indices)
-            # sorted_indices = np.argsort(example_indices)
-            # example_indices = example_indices[sorted_indices]
-            if not self.are_sparse[view_index]:
-                return self.views[view_index][
-                       example_indices, :]
-            else: # pragma: no cover
-                # TODO Sparse support
-                pass
-
-    def get_nb_class(self, example_indices=None):
-        """Gets the number of class of the dataset"""
-        example_indices = self.init_example_indices(example_indices)
-        return len(np.unique(self.labels[example_indices]))
-
-    def filter(self, labels, label_names, example_indices, view_names,
-               path=None):
-        if self.example_ids is not None:
-            self.example_ids = self.example_ids[example_indices]
-        self.labels = self.labels[example_indices]
-        self.labels_names = [name for lab_index, name
-                             in enumerate(self.labels_names)
-                             if lab_index in np.unique(self.labels)]
-        self.labels = np.array(
-            [np.where(label == np.unique(self.labels))[0] for label in
-             self.labels])
-        self.view_names = view_names
-        new_views = []
-        for new_view_ind, view_name in enumerate(self.view_names):
-            new_views.append(
-                self.views[self.view_dict[view_name]][example_indices, :])
-        self.views = new_views
-        self.view_dict = dict((view_name, view_ind)
-                              for view_ind, view_name
-                              in enumerate(self.view_names))
-        self.nb_view = len(self.views)
-
-    def get_view_dict(self):
-        return self.view_dict
-
-    def get_name(self):
-        return self.name
-
-
-class HDF5Dataset(Dataset):
-    """
-    Class of Dataset
-
-    This class is used to encapsulate the multiview dataset while keeping it stored on the disk instead of in RAM.
-
-
-    Parameters
-    ----------
-    views : list of numpy arrays or None
-        The list containing each view of the dataset as a numpy array of shape
-        (nb examples, nb features).
-
-    labels : numpy array or None
-        The labels for the multiview dataset, of shape (nb examples, ).
-
-    are_sparse : list of bool, or None
-        The list of boolean telling if each view is sparse or not.
-
-    file_name : str, or None
-        The name of the hdf5 file that will be created to store the multiview
-        dataset.
-
-    view_names : list of str, or None
-        The name of each view.
-
-    path : str, or None
-        The path where the hdf5 dataset file will be stored
-
-    hdf5_file : h5py.File object, or None
-        If not None, the dataset will be imported directly from this file.
-
-    labels_names : list of str, or None
-        The name for each unique value of the labels given in labels.
-
-    is_temp : bool
-        Used if a temporary dataset has to be used by the benchmark.
-
-    Attributes
-    ----------
-    dataset : h5py.File object
-        The h5py file pbject that points to the hdf5 dataset on the disk.
-
-    nb_view : int
-        The number of views in the dataset.
-
-    view_dict : dict
-        The dictionnary with the name of each view as the keys and their indices
-         as values
-    """
-
-    # The following methods use hdf5
-
-    def __init__(self, views=None, labels=None, are_sparse=False,
-                 file_name="dataset.hdf5", view_names=None, path="",
-                 hdf5_file=None, labels_names=None, is_temp=False,
-                 example_ids=None, ):
-        self.is_temp = False
-        if hdf5_file is not None:
-            self.dataset = hdf5_file
-            self.init_attrs()
-        else:
-            secure_file_path(os.path.join(path, file_name))
-            dataset_file = h5py.File(os.path.join(path, file_name), "w")
-            if view_names is None:
-                view_names = ["View" + str(index) for index in
-                              range(len(views))]
-            if isinstance(are_sparse, bool): # pragma: no cover
-                are_sparse = [are_sparse for _ in views]
-            for view_index, (view_name, view, is_sparse) in enumerate(
-                    zip(view_names, views, are_sparse)):
-                view_dataset = dataset_file.create_dataset(
-                    "View" + str(view_index),
-                    view.shape,
-                    data=view)
-                view_dataset.attrs["name"] = view_name
-                view_dataset.attrs["sparse"] = is_sparse
-            labels_dataset = dataset_file.create_dataset("Labels",
-                                                         shape=labels.shape,
-                                                         data=labels)
-            if labels_names is None:
-                labels_names = [str(index) for index in np.unique(labels)]
-            labels_dataset.attrs["names"] = [label_name.encode()
-                                             if not isinstance(label_name,
-                                                               bytes)
-                                             else label_name
-                                             for label_name in labels_names]
-            meta_data_grp = dataset_file.create_group("Metadata")
-            meta_data_grp.attrs["nbView"] = len(views)
-            meta_data_grp.attrs["nbClass"] = len(np.unique(labels))
-            meta_data_grp.attrs["datasetLength"] = len(labels)
-            dataset_file.close()
-            self.update_hdf5_dataset(os.path.join(path, file_name))
-            if example_ids is not None:
-                example_ids = [example_id if not is_just_number(example_id)
-                               else "ID_" + example_id for example_id in
-                               example_ids]
-                self.example_ids = example_ids
-            else:
-                self.example_ids = ["ID_" + str(i)
-                                    for i in range(labels.shape[0])]
-
-    def get_v(self, view_index, example_indices=None):
-        r""" Extract the view and returns a numpy.ndarray containing the description
-        of the examples specified in example_indices
-
-        Parameters
-        ----------
-        view_index : int
-            The index of the view to extract
-        example_indices : numpy.ndarray
-            The array containing the indices of the examples to extract.
-
-        Returns
-        -------
-        A numpy.ndarray containing the view data for the needed examples
-        """
-        example_indices = self.init_example_indices(example_indices)
-        if type(example_indices) is int:
-            return self.dataset["View" + str(view_index)][example_indices, :]
-        else:
-            example_indices = np.array(example_indices)
-            # sorted_indices = np.argsort(example_indices)
-            # example_indices = example_indices[sorted_indices]
-
-            if not self.dataset["View" + str(view_index)].attrs["sparse"]:
-                return self.dataset["View" + str(view_index)][()][
-                       example_indices, :]  # [np.argsort(sorted_indices), :]
-            else: # pragma: no cover
-                # Work in progress
-                pass
-
-    def get_view_name(self, view_idx):
-        """
-        Method to get a view's name from its index.
-
-        Parameters
-        ----------
-        view_idx : int
-            The index of the view in the dataset
-
-        Returns
-        -------
-            The view's name.
-
-        """
-        return self.dataset["View" + str(view_idx)].attrs["name"]
-
-    def init_attrs(self):
-        """
-        Used to init the attributes that are modified when self.dataset
-        changes
-
-        Returns
-        -------
-
-        """
-        self.nb_view = self.dataset["Metadata"].attrs["nbView"]
-        self.view_dict = self.get_view_dict()
-        if "example_ids" in self.dataset["Metadata"].keys():
-            self.example_ids = [example_id.decode()
-                                if not is_just_number(example_id.decode())
-                                else "ID_" + example_id.decode()
-                                for example_id in
-                                self.dataset["Metadata"]["example_ids"]]
-        else:
-            self.example_ids = ["ID_"+str(i) for i in
-                                range(self.dataset["Labels"].shape[0])]
-
-    def get_nb_examples(self):
-        """
-        Used to get the number of examples available in hte dataset
-
-        Returns
-        -------
-
-        """
-        return self.dataset["Metadata"].attrs["datasetLength"]
-
-    def get_view_dict(self):
-        """
-        Returns the dictionary with view indices as keys and their corresponding
-        names as values
-        """
-        view_dict = {}
-        for view_index in range(self.nb_view):
-            view_dict[self.dataset["View" + str(view_index)].attrs[
-                "name"]] = view_index
-        return view_dict
-
-    def get_label_names(self, decode=True, example_indices=None):
-        """
-        Used to get the list of the label names for the given set of examples
-
-        Parameters
-        ----------
-        decode : bool
-            If True, will decode the label names before listing them
-
-        example_indices : numpy.ndarray
-            The array containing the indices of the needed examples
-
-        Returns
-        -------
-
-        """
-        selected_labels = self.get_labels(example_indices)
-        if decode:
-            return [label_name.decode("utf-8")
-                    for label, label_name in
-                    enumerate(self.dataset["Labels"].attrs["names"])
-                    if label in selected_labels]
-        else:
-            return [label_name
-                    for label, label_name in
-                    enumerate(self.dataset["Labels"].attrs["names"])
-                    if label in selected_labels]
-
-    def get_nb_class(self, example_indices=None):
-        """
-        Gets the number of classes of the dataset for the asked examples
-
-         Parameters
-        ----------
-        example_indices : numpy.ndarray
-            The array containing the indices of the examples to extract.
-
-        Returns
-        -------
-        int : The number of classes
-
-        """
-        example_indices = self.init_example_indices(example_indices)
-        return len(np.unique(self.dataset["Labels"][()][example_indices]))
-
-    def get_labels(self, example_indices=None):
-        """Gets the label array for the asked examples
-
-         Parameters
-        ----------
-        example_indices : numpy.ndarray
-            The array containing the indices of the examples to extract.
-
-        Returns
-        -------
-        numpy.ndarray containing the labels of the asked examples"""
-        example_indices = self.init_example_indices(example_indices)
-        return self.dataset["Labels"][()][example_indices]
-
-    def rm(self): # pragma: no cover
-        """
-        Method used to delete the dataset file on the disk if the dataset is
-        temporary.
-
-        Returns
-        -------
-
-        """
-        filename = self.dataset.filename
-        self.dataset.close()
-        if self.is_temp:
-            os.remove(filename)
-
-
-    def copy_view(self, target_dataset=None, source_view_name=None,
-                  target_view_index=None, example_indices=None):
-        example_indices = self.init_example_indices(example_indices)
-        new_d_set = target_dataset.create_dataset(
-            "View" + str(target_view_index),
-            data=self.get_v(self.view_dict[source_view_name],
-                            example_indices=example_indices))
-        for key, value in self.dataset[
-            "View" + str(self.view_dict[source_view_name])].attrs.items():
-            new_d_set.attrs[key] = value
-
-    def init_view_names(self, view_names=None):
-        if view_names is None:
-            return [key for key in self.get_view_dict().keys()]
-        else:
-            return view_names
-
-    def update_hdf5_dataset(self, path):
-        if hasattr(self, 'dataset'):
-            self.dataset.close()
-        self.dataset = h5py.File(path, 'r')
-        self.is_temp = True
-        self.init_attrs()
-
-    def filter(self, labels, label_names, example_indices, view_names,
-               path=None):
-        dataset_file_path = os.path.join(path,
-                                         self.get_name() + "_temp_filter.hdf5")
-        new_dataset_file = h5py.File(dataset_file_path, "w")
-        self.dataset.copy("Metadata", new_dataset_file)
-        if "example_ids" in self.dataset["Metadata"].keys():
-            del new_dataset_file["Metadata"]["example_ids"]
-            ex_ids = new_dataset_file["Metadata"].create_dataset("example_ids",
-                                                                 data=np.array(
-                                                                     self.example_ids)[
-                                                                     example_indices].astype(
-                                                                     np.dtype(
-                                                                         "S100")))
-        else:
-            new_dataset_file["Metadata"].create_dataset("example_ids",
-                                                        (
-                                                        len(self.example_ids),),
-                                                        data=np.array(
-                                                            self.example_ids).astype(
-                                                            np.dtype("S100")),
-                                                        dtype=np.dtype("S100"))
-        new_dataset_file["Metadata"].attrs["datasetLength"] = len(
-            example_indices)
-        new_dataset_file["Metadata"].attrs["nbClass"] = np.unique(labels)
-        new_dataset_file.create_dataset("Labels", data=labels)
-        new_dataset_file["Labels"].attrs["names"] = [label_name.encode()
-                                                     if not isinstance(
-            label_name, bytes)
-                                                     else label_name
-                                                     for label_name in
-                                                     label_names]
-        view_names = self.init_view_names(view_names)
-        new_dataset_file["Metadata"].attrs["nbView"] = len(view_names)
-        for new_index, view_name in enumerate(view_names):
-            self.copy_view(target_dataset=new_dataset_file,
-                           source_view_name=view_name,
-                           target_view_index=new_index,
-                           example_indices=example_indices)
-        new_dataset_file.close()
-        self.update_hdf5_dataset(dataset_file_path)
-
-    def add_gaussian_noise(self, random_state, path,
-                           noise_std=0.15):
-        """In this function, we add a guaussian noise centered in 0 with specified
-        std to each view, according to it's range (the noise will be
-        mutliplied by this range) and we crop the noisy signal according to the
-        view's attributes limits.
-        This is done by creating a new dataset, to keep clean data."""
-        noisy_dataset = h5py.File(path + self.get_name() + "_noised.hdf5", "w")
-        self.dataset.copy("Metadata", noisy_dataset)
-        self.dataset.copy("Labels", noisy_dataset)
-        for view_index in range(self.nb_view):
-            self.copy_view(target_dataset=noisy_dataset,
-                           source_view_name=self.get_view_name(view_index),
-                           target_view_index=view_index)
-        for view_index in range(noisy_dataset["Metadata"].attrs["nbView"]):
-            view_key = "View" + str(view_index)
-            view_dset = noisy_dataset[view_key]
-            view_limits = self.dataset[
-                    "Metadata/View" + str(view_index) + "_limits"][()]
-            view_ranges = view_limits[:, 1] - view_limits[:, 0]
-            normal_dist = random_state.normal(0, noise_std, view_dset[()].shape)
-            noise = normal_dist * view_ranges
-            noised_data = view_dset[()] + noise
-            noised_data = np.where(noised_data < view_limits[:, 0],
-                                   view_limits[:, 0], noised_data)
-            noised_data = np.where(noised_data > view_limits[:, 1],
-                                   view_limits[:, 1], noised_data)
-            noisy_dataset[view_key][...] = noised_data
-        noisy_dataset_path = noisy_dataset.filename
-        noisy_dataset.close()
-        self.update_hdf5_dataset(noisy_dataset_path)
-
-    # The following methods are hdf5 free
-
-    def get_name(self):
-        """Ony works if there are not multiple dots in the files name"""
-        return self.dataset.filename.split('/')[-1].split('.')[0]
-
-
-def is_just_number(string):
-    try:
-        float(string)
-        return True
-    except ValueError:
-        return False
-
-
-def datasets_already_exist(pathF, name, nbCores):
-    """Used to check if it's necessary to copy datasets"""
-    allDatasetExist = True
-    for coreIndex in range(nbCores):
-        allDatasetExist *= os.path.isfile(os.path.join(
-            pathF, name + str(coreIndex) + ".hdf5"))
-    return allDatasetExist
-
-
-def extract_subset(matrix, used_indices):
-    """Used to extract a subset of a matrix even if it's sparse WIP"""
-    # if sparse.issparse(matrix):
-    #     new_indptr = np.zeros(len(used_indices) + 1, dtype=int)
-    #     oldindptr = matrix.indptr
-    #     for exampleIndexIndex, exampleIndex in enumerate(used_indices):
-    #         new_indptr[exampleIndexIndex + 1] = new_indptr[
-    #                                                 exampleIndexIndex] + (
-    #                                                     oldindptr[
-    #                                                         exampleIndex + 1] -
-    #                                                     oldindptr[exampleIndex])
-    #     new_data = np.ones(new_indptr[-1], dtype=bool)
-    #     new_indices = np.zeros(new_indptr[-1], dtype=int)
-    #     old_indices = matrix.indices
-    #     for exampleIndexIndex, exampleIndex in enumerate(used_indices):
-    #         new_indices[new_indptr[exampleIndexIndex]:new_indptr[
-    #             exampleIndexIndex + 1]] = old_indices[
-    #                                       oldindptr[exampleIndex]:
-    #                                       oldindptr[exampleIndex + 1]]
-    #     return sparse.csr_matrix((new_data, new_indices, new_indptr),
-    #                              shape=(len(used_indices), matrix.shape[1]))
-    # else:
-    return matrix[used_indices]
-
-
-def init_multiple_datasets(path_f, name, nb_cores): # pragma: no cover
-    r"""Used to create copies of the dataset if multicore computation is used.
-
-    This is a temporary solution to fix the sharing memory issue with HDF5 datasets.
-
-    Parameters
-    ----------
-    path_f : string
-        Path to the original dataset directory
-    name : string
-        Name of the dataset
-    nb_cores : int
-        The number of threads that the benchmark can use
-
-    Returns
-    -------
-    datasetFiles : None
-        Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
-    """
-    if nb_cores > 1:
-        if datasets_already_exist(path_f, name, nb_cores):
-            logging.debug(
-                "Info:\t Enough copies of the dataset are already available")
-            pass
-        else:
-            if os.path.getsize(os.path.join(path_f, name + ".hdf5")) * nb_cores / float(1024) / 1000 / 1000 > 0.1:
-                logging.debug("Start:\t Creating " + str(
-                    nb_cores) + " temporary datasets for multiprocessing")
-                logging.warning(
-                    " WARNING : /!\ This may use a lot of HDD storage space : " +
-                    str(os.path.getsize(os.path.join(path_f, name + ".hdf5")) * nb_cores / float(
-                        1024) / 1000 / 1000) + " Gbytes /!\ ")
-                confirmation = confirm()
-                if not confirmation:
-                    sys.exit(0)
-                else:
-                    pass
-            else:
-                pass
-            dataset_files = copy_hdf5(path_f, name, nb_cores)
-            logging.debug("Start:\t Creating datasets for multiprocessing")
-            return dataset_files
-
-
-def copy_hdf5(pathF, name, nbCores):
-    """Used to copy a HDF5 database in case of multicore computing"""
-    datasetFile = h5py.File(pathF + name + ".hdf5", "r")
-    for coreIndex in range(nbCores):
-        newDataSet = h5py.File(pathF + name + str(coreIndex) + ".hdf5", "w")
-        for dataset in datasetFile:
-            datasetFile.copy("/" + dataset, newDataSet["/"])
-        newDataSet.close()
-
-
-def delete_HDF5(benchmarkArgumentsDictionaries, nbCores, dataset):
-    """Used to delete temporary copies at the end of the benchmark"""
-    if nbCores > 1:
-        logging.debug("Start:\t Deleting " + str(
-            nbCores) + " temporary datasets for multiprocessing")
-        args = benchmarkArgumentsDictionaries[0]["args"]
-        logging.debug("Start:\t Deleting datasets for multiprocessing")
-
-        for coreIndex in range(nbCores):
-            os.remove(args["pathf"] + args["name"] + str(coreIndex) + ".hdf5")
-    if dataset.is_temp:
-        dataset.rm()
-
-
-def confirm(resp=True, timeout=15): # pragma: no cover
-    """Used to process answer"""
-    ans = input_(timeout)
-    if not ans:
-        return resp
-    if ans not in ['y', 'Y', 'n', 'N']:
-        print('please enter y or n.')
-    if ans == 'y' or ans == 'Y':
-        return True
-    if ans == 'n' or ans == 'N':
-        return False
-
-
-def input_(timeout=15): # pragma: no cover
-    """used as a UI to stop if too much HDD space will be used"""
-    logging.warning("You have " + str(
-        timeout) + " seconds to stop the dataset copy by typing n")
-    i, o, e = select.select([sys.stdin], [], [], timeout)
-    if i:
-        return sys.stdin.readline().strip()
-    else:
-        return "y"
-
-
-def get_examples_views_indices(dataset, examples_indices, view_indices, ):
-    """This function  is used to get all the examples indices and view indices if needed"""
-    if view_indices is None:
-        view_indices = np.arange(dataset.nb_view)
-    if examples_indices is None:
-        examples_indices = np.arange(dataset.get_nb_examples())
-    return examples_indices, view_indices
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/execution.py b/multiview_platform/mono_multi_view_classifiers/utils/execution.py
deleted file mode 100644
index 3570bb2b685a9fe0e2cdded10f367177ad046a85..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/execution.py
+++ /dev/null
@@ -1,426 +0,0 @@
-import argparse
-import logging
-import os
-import pickle
-import time
-
-import numpy as np
-import sklearn
-
-from . import get_multiview_db as DB
-from ..utils.configuration import save_config
-
-
-def parse_the_args(arguments):
-    """Used to parse the args entered by the user"""
-
-    parser = argparse.ArgumentParser(
-        description='This file is used to benchmark the scores fo multiple '
-                    'classification algorithm on multiview data.',
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-        fromfile_prefix_chars='@')
-
-    groupStandard = parser.add_argument_group('Standard arguments')
-    groupStandard.add_argument('--config_path', metavar='STRING',
-                               action='store',
-                               help='Path to the hdf5 dataset or database '
-                                    'folder (default: %(default)s)',
-                               default='../config_files/config.yml')
-    args = parser.parse_args(arguments)
-    return args
-
-
-def init_random_state(random_state_arg, directory):
-    r"""
-    Used to init a random state.
-    If no random state is specified, it will generate a 'random' seed.
-    If the `randomSateArg` is a string containing only numbers, it will be converted in
-     an int to generate a seed.
-    If the `randomSateArg` is a string with letters, it must be a path to a pickled random
-    state file that will be loaded.
-    The function will also pickle the new random state in a file tobe able to retrieve it later.
-    Tested
-
-
-    Parameters
-    ----------
-    random_state_arg : None or string
-        See function description.
-    directory : string
-        Path to the results directory.
-
-    Returns
-    -------
-    random_state : numpy.random.RandomState object
-        This random state will be used all along the benchmark .
-    """
-
-    if random_state_arg is None:
-        random_state = np.random.RandomState(random_state_arg)
-    else:
-        try:
-            seed = int(random_state_arg)
-            random_state = np.random.RandomState(seed)
-        except ValueError:
-            file_name = random_state_arg
-            with open(file_name, 'rb') as handle:
-                random_state = pickle.load(handle)
-    with open(os.path.join(directory, "random_state.pickle"), "wb") as handle:
-        pickle.dump(random_state, handle)
-    return random_state
-
-
-def init_stats_iter_random_states(stats_iter, random_state):
-    r"""
-    Used to initialize multiple random states if needed because of multiple statistical iteration of the same benchmark
-
-    Parameters
-    ----------
-    stats_iter : int
-        Number of statistical iterations of the same benchmark done (with a different random state).
-    random_state : numpy.random.RandomState object
-        The random state of the whole experimentation, that will be used to generate the ones for each
-        statistical iteration.
-
-    Returns
-    -------
-    stats_iter_random_states : list of numpy.random.RandomState objects
-        Multiple random states, one for each sattistical iteration of the same benchmark.
-    """
-    if stats_iter > 1:
-        stats_iter_random_states = [
-            np.random.RandomState(random_state.randint(5000)) for _ in
-            range(stats_iter)]
-    else:
-        stats_iter_random_states = [random_state]
-    return stats_iter_random_states
-
-
-def get_database_function(name, type_var):
-    r"""Used to get the right database extraction function according to the type of database and it's name
-
-    Parameters
-    ----------
-    name : string
-        Name of the database.
-    type_var : string
-        type of dataset hdf5 or csv
-
-    Returns
-    -------
-    getDatabase : function
-        The function that will be used to extract the database
-    """
-    if name not in ["fake", "plausible"]:
-        get_database = getattr(DB, "get_classic_db_" + type_var[1:])
-    else:
-        get_database = getattr(DB, "get_" + name + "_db_" + type_var[1:])
-    return get_database
-
-
-def init_log_file(name, views, cl_type, log, debug, label,
-                  result_directory, args):
-    r"""Used to init the directory where the preds will be stored and the log file.
-
-    First this function will check if the result directory already exists (only one per minute is allowed).
-
-    If the the result directory name is available, it is created, and the logfile is initiated.
-
-    Parameters
-    ----------
-    name : string
-        Name of the database.
-    views : list of strings
-        List of the view names that will be used in the benchmark.
-    cl_type : list of strings
-        Type of benchmark that will be made .
-    log : bool
-        Whether to show the log file in console or hide it.
-    debug : bool
-        for debug option
-    label : str  for label
-
-    result_directory : str name of the result directory
-
-    add_noise : bool for add noise
-
-    noise_std : level of std noise
-
-    Returns
-    -------
-    results_directory : string
-        Reference to the main results directory for the benchmark.
-    """
-    if views is None:
-        views = []
-    # result_directory = os.path.join(os.path.dirname(
-    #     os.path.dirname(os.path.dirname(os.path.realpath(__file__)))),
-    #                                 result_directory)
-    if debug:
-        result_directory = os.path.join(result_directory, name,
-                                        "debug_started_" + time.strftime(
-                                            "%Y_%m_%d-%H_%M_%S") + "_" + label)
-    else:
-        result_directory = os.path.join(result_directory, name,
-                                        "started_" + time.strftime(
-                                            "%Y_%m_%d-%H_%M") + "_" + label)
-    log_file_name = time.strftime("%Y_%m_%d-%H_%M") + "-" + ''.join(
-        cl_type) + "-" + "_".join(views) + "-" + name + "-LOG.log"
-    if os.path.exists(result_directory): # pragma: no cover
-        raise NameError("The result dir already exists, wait 1 min and retry")
-    log_file_path = os.path.join(result_directory, log_file_name)
-    os.makedirs(os.path.dirname(log_file_path))
-    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
-                        filename=log_file_path, level=logging.DEBUG,
-                        filemode='w')
-    if log:
-        logging.getLogger().addHandler(logging.StreamHandler())
-    save_config(result_directory, args)
-    return result_directory
-
-
-def gen_splits(labels, split_ratio, stats_iter_random_states):
-    r"""Used to _gen the train/test splits using one or multiple random states.
-
-    Parameters
-    ----------
-    labels : numpy.ndarray
-        Name of the database.
-    split_ratio : float
-        The ratio of examples between train and test set.
-    stats_iter_random_states : list of numpy.random.RandomState
-        The random states for each statistical iteration.
-
-    Returns
-    -------
-    splits : list of lists of numpy.ndarray
-        For each statistical iteration a couple of numpy.ndarrays is stored with the indices for the training set and
-        the ones of the testing set.
-    """
-    indices = np.arange(len(labels))
-    splits = []
-    for random_state in stats_iter_random_states:
-        folds_obj = sklearn.model_selection.StratifiedShuffleSplit(n_splits=1,
-                                                                   random_state=random_state,
-                                                                   test_size=split_ratio)
-        folds = folds_obj.split(indices, labels)
-        for fold in folds:
-            train_fold, test_fold = fold
-        train_indices = indices[train_fold]
-        test_indices = indices[test_fold]
-        splits.append([train_indices, test_indices])
-
-    return splits
-
-
-def gen_k_folds(stats_iter, nb_folds, stats_iter_random_states):
-    r"""Used to generate folds indices for cross validation for each statistical iteration.
-
-    Parameters
-    ----------
-    stats_iter : integer
-        Number of statistical iterations of the benchmark.
-    nb_folds : integer
-        The number of cross-validation folds for the benchmark.
-    stats_iter_random_states : list of numpy.random.RandomState
-        The random states for each statistical iteration.
-
-    Returns
-    -------
-    folds_list : list of list of sklearn.model_selection.StratifiedKFold
-        For each statistical iteration a Kfold stratified (keeping the ratio between classes in each fold).
-    """
-    if stats_iter > 1:
-        folds_list = []
-        for random_state in stats_iter_random_states:
-            folds_list.append(
-                sklearn.model_selection.StratifiedKFold(n_splits=nb_folds,
-                                                        random_state=random_state,
-                                                        shuffle=True))
-    else:
-        if isinstance(stats_iter_random_states, list):
-            stats_iter_random_states = stats_iter_random_states[0]
-        folds_list = [sklearn.model_selection.StratifiedKFold(n_splits=nb_folds,
-                                                              random_state=stats_iter_random_states,
-                                                              shuffle=True)]
-    return folds_list
-
-
-def init_views(dataset_var, arg_views):
-    r"""Used to return the views names that will be used by the
-    benchmark, their indices and all the views names.
-
-    Parameters
-    ----------
-    dataset_var : HDF5 dataset file
-        The full dataset that wil be used by the benchmark.
-    arg_views : list of strings
-        The views that will be used by the benchmark (arg).
-
-    Returns
-    -------
-    views : list of strings
-        Names of the views that will be used by the benchmark.
-    view_indices : list of ints
-        The list of the indices of the view that will be used in the benchmark (according to the dataset).
-    all_views : list of strings
-        Names of all the available views in the dataset.
-    """
-    nb_view = dataset_var.nb_view
-    if arg_views is not None:
-        allowed_views = arg_views
-        all_views = [str(dataset_var.get_view_name(view_index))
-                     if type(dataset_var.get_view_name(view_index)) != bytes
-                     else dataset_var.get_view_name(view_index).decode("utf-8")
-                     for view_index in range(nb_view)]
-        views = []
-        views_indices = []
-        for view_index in range(nb_view):
-            view_name = dataset_var.get_view_name(view_index)
-            if type(view_name) == bytes:
-                view_name = view_name.decode("utf-8")
-            if view_name in allowed_views:
-                views.append(view_name)
-                views_indices.append(view_index)
-    else:
-        views = [str(dataset_var.get_view_name(view_index))
-                 if type(dataset_var.get_view_name(view_index)) != bytes
-                 else dataset_var.get_view_name(view_index).decode("utf-8")
-                 for view_index in range(nb_view)]
-        views_indices = range(nb_view)
-        all_views = views
-    return views, views_indices, all_views
-
-
-def gen_direcorties_names(directory, stats_iter):
-    r"""Used to generate the different directories of each iteration if needed.
-
-    Parameters
-    ----------
-    directory : string
-        Path to the results directory.
-    statsIter : int
-        The number of statistical iterations.
-
-    Returns
-    -------
-    directories : list of strings
-        Paths to each statistical iterations result directory.
-    """
-    if stats_iter > 1:
-        directories = []
-        for i in range(stats_iter):
-            directories.append(os.path.join(directory, "iter_" + str(i + 1)))
-    else:
-        directories = [directory]
-    return directories
-
-
-def find_dataset_names(path, type, names):
-    """This function goal is to browse the dataset directory and extrats all
-     the needed dataset names."""
-    package_path = os.path.dirname(
-        os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
-    if os.path.isdir(path):
-        pass
-    elif os.path.isdir(os.path.join(package_path, path)):
-        path = os.path.join(package_path, path)
-    else:
-        raise ValueError("The provided pathf does not exist ({}) SuMMIT checks "
-                         "the prefix from where you are running your script ({}) "
-                         "and the multiview_platform package prefix ({}). "
-                         "You may want to try with an absolute path in the "
-                         "config file".format(path, os.getcwd(), package_path))
-    available_file_names = [file_name.strip().split(".")[0]
-                            for file_name in
-                            os.listdir(path)
-                            if file_name.endswith(type)]
-    if names == ["all"]:
-        return path, available_file_names
-    elif isinstance(names, str):
-        return path, [used_name for used_name in available_file_names if names == used_name]
-    elif len(names) > 1:
-        selected_names = [used_name for used_name in available_file_names if
-                          used_name in names]
-        if not selected_names:
-            raise ValueError(
-                "None of the provided dataset names are available. Available datasets are {}".format(
-                    available_file_names))
-        return path, [used_name for used_name in available_file_names if
-                used_name in names]
-    elif names[0] in available_file_names:
-        return path, names
-    else:
-        raise ValueError("The asked dataset ({}) is not available in {}. \n The available ones are {}".format(names[0], path, available_file_names))
-
-
-def gen_argument_dictionaries(labels_dictionary, directories,
-                              splits,
-                              hyper_param_search, args, k_folds,
-                              stats_iter_random_states, metrics,
-                              argument_dictionaries,
-                              benchmark, views, views_indices,): # pragma: no cover
-    r"""Used to generate a dictionary for each benchmark.
-
-    One for each label combination (if multiclass), for each statistical iteration, generates an dictionary with
-    all necessary information to perform the benchmark
-
-    Parameters
-    ----------
-    labels_dictionary : dictionary
-        Dictionary mapping labels indices to labels names.
-    directories : list of strings
-        List of the paths to the result directories for each statistical iteration.
-    multiclass_labels : list of lists of numpy.ndarray
-        For each label couple, for each statistical iteration a triplet of numpy.ndarrays is stored with the
-        indices for the biclass training set, the ones for the biclass testing set and the ones for the
-        multiclass testing set.
-    labels_combinations : list of lists of numpy.ndarray
-        Each original couple of different labels.
-    indices_multiclass : list of lists of numpy.ndarray
-        For each combination, contains a biclass labels numpy.ndarray with the 0/1 labels of combination.
-    hyper_param_search : string
-        Type of hyper parameter optimization method
-    args : parsed args objects
-        All the args passed by the user.
-    k_folds : list of list of sklearn.model_selection.StratifiedKFold
-        For each statistical iteration a Kfold stratified (keeping the ratio between classes in each fold).
-    stats_iter_random_states : list of numpy.random.RandomState objects
-        Multiple random states, one for each sattistical iteration of the same benchmark.
-    metrics : list of lists
-        metrics that will be used to evaluate the algorithms performance.
-    argument_dictionaries : dictionary
-        Dictionary resuming all the specific arguments for the benchmark, oe dictionary for each classifier.
-    benchmark : dictionary
-        Dictionary resuming which mono- and multiview algorithms which will be used in the benchmark.
-    nb_views : int
-        THe number of views used by the benchmark.
-    views : list of strings
-        List of the names of the used views.
-    views_indices : list of ints
-        List of indices (according to the dataset) of the used views.
-
-    Returns
-    -------
-    benchmarkArgumentDictionaries : list of dicts
-        All the needed arguments for the benchmarks.
-
-    """
-    benchmark_argument_dictionaries = []
-    for iter_index, iterRandomState in enumerate(stats_iter_random_states):
-        benchmark_argument_dictionary = {
-            "labels_dictionary": labels_dictionary,
-            "directory": directories[iter_index],
-            "classification_indices": splits[iter_index],
-            "args": args,
-            "k_folds": k_folds[iter_index],
-            "random_state": iterRandomState,
-            "hyper_param_search": hyper_param_search,
-            "metrics": metrics,
-            "argument_dictionaries": argument_dictionaries,
-            "benchmark": benchmark,
-            "views": views,
-            "views_indices": views_indices,
-            "flag": iter_index}
-        benchmark_argument_dictionaries.append(benchmark_argument_dictionary)
-    return benchmark_argument_dictionaries
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py b/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
deleted file mode 100644
index b3d2a24c7acb043eb43360b63e098a49319cd275..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/get_multiview_db.py
+++ /dev/null
@@ -1,1311 +0,0 @@
-import os
-
-import h5py
-import numpy as np
-
-from .dataset import RAMDataset, HDF5Dataset
-from .organization import secure_file_path
-
-# Author-Info
-__author__ = "Baptiste Bauvin"
-__status__ = "Prototype"  # Production, Development, Prototype
-
-
-def make_me_noisy(view_data, random_state, percentage=5):
-    """used to introduce some noise in the generated data"""
-    view_data = view_data.astype(bool)
-    nb_noisy_coord = int(
-        percentage / 100.0 * view_data.shape[0] * view_data.shape[1])
-    rows = range(view_data.shape[0])
-    cols = range(view_data.shape[1])
-    for _ in range(nb_noisy_coord):
-        row_idx = random_state.choice(rows)
-        col_idx = random_state.choice(cols)
-        view_data[row_idx, col_idx] = 0
-    noisy_view_data = view_data.astype(np.uint8)
-    return noisy_view_data
-
-
-def get_plausible_db_hdf5(features, path, file_name, nb_class=3,
-                          label_names=["No".encode(), "Yes".encode(),
-                                       "Maybe".encode()],
-                          random_state=None, full=True, add_noise=False,
-                          noise_std=0.15, nb_view=3, nb_examples=100,
-                          nb_features=10):
-    """Used to generate a plausible dataset to test the algorithms"""
-    secure_file_path(os.path.join(path, "plausible.hdf5"))
-    example_ids = ["exmaple_id_" + str(i) for i in range(nb_examples)]
-    views = []
-    view_names = []
-    are_sparse = []
-    if nb_class == 2:
-        labels = np.array(
-            [0 for _ in range(int(nb_examples / 2))] + [1 for _ in range(
-                nb_examples - int(nb_examples / 2))])
-        label_names = ["No".encode(), "Yes".encode()]
-        for view_index in range(nb_view):
-            view_data = np.array(
-                [np.zeros(nb_features) for _ in range(int(nb_examples / 2))] +
-                [np.ones(nb_features) for _ in
-                 range(nb_examples - int(nb_examples / 2))])
-            fake_one_indices = random_state.randint(0, int(nb_examples / 2),
-                                                    int(nb_examples / 12))
-            fake_zero_indices = random_state.randint(int(nb_examples / 2),
-                                                     nb_examples,
-                                                     int(nb_examples / 12))
-            for index in np.concatenate((fake_one_indices, fake_zero_indices)):
-                example_ids[index] += "noised"
-
-            view_data[fake_one_indices] = np.ones(
-                (len(fake_one_indices), nb_features))
-            view_data[fake_zero_indices] = np.zeros(
-                (len(fake_zero_indices), nb_features))
-            view_data = make_me_noisy(view_data, random_state)
-            views.append(view_data)
-            view_names.append("ViewNumber" + str(view_index))
-            are_sparse.append(False)
-
-        dataset = RAMDataset(views=views, labels=labels,
-                             labels_names=label_names, view_names=view_names,
-                             are_sparse=are_sparse, example_ids=example_ids,
-                             name='plausible')
-        labels_dictionary = {0: "No", 1: "Yes"}
-        return dataset, labels_dictionary, "plausible"
-    elif nb_class >= 3:
-        firstBound = int(nb_examples / 3)
-        rest = nb_examples - 2 * int(nb_examples / 3)
-        scndBound = 2 * int(nb_examples / 3)
-        thrdBound = nb_examples
-        labels = np.array(
-            [0 for _ in range(firstBound)] +
-            [1 for _ in range(firstBound)] +
-            [2 for _ in range(rest)]
-        )
-        for view_index in range(nb_view):
-            view_data = np.array(
-                [np.zeros(nb_features) for _ in range(firstBound)] +
-                [np.ones(nb_features) for _ in range(firstBound)] +
-                [np.ones(nb_features) + 1 for _ in range(rest)])
-            fake_one_indices = random_state.randint(0, firstBound,
-                                                    int(nb_examples / 12))
-            fakeTwoIndices = random_state.randint(firstBound, scndBound,
-                                                  int(nb_examples / 12))
-            fake_zero_indices = random_state.randint(scndBound, thrdBound,
-                                                     int(nb_examples / 12))
-
-            view_data[fake_one_indices] = np.ones(
-                (len(fake_one_indices), nb_features))
-            view_data[fake_zero_indices] = np.zeros(
-                (len(fake_zero_indices), nb_features))
-            view_data[fakeTwoIndices] = np.ones(
-                (len(fakeTwoIndices), nb_features)) + 1
-            view_data = make_me_noisy(view_data, random_state)
-            views.append(view_data)
-            view_names.append("ViewNumber" + str(view_index))
-            are_sparse.append(False)
-        dataset = RAMDataset(views=views, labels=labels,
-                             labels_names=label_names, view_names=view_names,
-                             are_sparse=are_sparse,
-                             name="plausible",
-                             example_ids=example_ids)
-        labels_dictionary = {0: "No", 1: "Yes", 2: "Maybe"}
-        return dataset, labels_dictionary, "plausible"
-
-
-class DatasetError(Exception):
-    def __init__(self, *args, **kwargs):
-        Exception.__init__(self, *args, **kwargs)
-
-
-def get_classic_db_hdf5(views, path_f, name_DB, nb_class, asked_labels_names,
-                        random_state, full=False, add_noise=False,
-                        noise_std=0.15,
-                        path_for_new="../data/"):
-    """Used to load a hdf5 database"""
-    if full:
-        dataset_file = h5py.File(os.path.join(path_f, name_DB + ".hdf5"), "r")
-        dataset = HDF5Dataset(hdf5_file=dataset_file)
-        dataset_name = name_DB
-        labels_dictionary = dict((label_index, label_name)
-                                 for label_index, label_name
-                                 in enumerate(dataset.get_label_names()))
-    else:
-        dataset_file = h5py.File(os.path.join(path_f, name_DB + ".hdf5"), "r")
-        dataset = HDF5Dataset(hdf5_file=dataset_file)
-        labels_dictionary = dataset.select_views_and_labels(nb_labels=nb_class,
-                                                            selected_label_names=asked_labels_names,
-                                                            view_names=views,
-                                                            random_state=random_state,
-                                                            path_for_new=path_for_new)
-        dataset_name = dataset.get_name()
-
-    if add_noise:
-        dataset.add_gaussian_noise(random_state, path_for_new, noise_std)
-        dataset_name = dataset.get_name()
-    else:
-        pass
-    return dataset, labels_dictionary, dataset_name
-
-
-def get_classic_db_csv(views, pathF, nameDB, NB_CLASS, askedLabelsNames,
-                       random_state, full=False, add_noise=False,
-                       noise_std=0.15,
-                       delimiter=",", path_for_new="../data/"):
-    # TODO : Update this one
-    labels_names = np.genfromtxt(pathF + nameDB + "-labels-names.csv",
-                                 dtype='str', delimiter=delimiter)
-    datasetFile = h5py.File(pathF + nameDB + ".hdf5", "w")
-    labels = np.genfromtxt(pathF + nameDB + "-labels.csv", delimiter=delimiter)
-    labelsDset = datasetFile.create_dataset("Labels", labels.shape, data=labels)
-    labelsDset.attrs["names"] = [labelName.encode() for labelName in
-                                 labels_names]
-    viewFileNames = [viewFileName for viewFileName in
-                     os.listdir(pathF + "Views/")]
-    for viewIndex, viewFileName in enumerate(os.listdir(pathF + "Views/")):
-        viewFile = pathF + "Views/" + viewFileName
-        if viewFileName[-6:] != "-s.csv":
-            viewMatrix = np.genfromtxt(viewFile, delimiter=delimiter)
-            viewDset = datasetFile.create_dataset("View" + str(viewIndex),
-                                                  viewMatrix.shape,
-                                                  data=viewMatrix)
-            del viewMatrix
-            viewDset.attrs["name"] = viewFileName[:-4]
-            viewDset.attrs["sparse"] = False
-        else:
-            pass
-    metaDataGrp = datasetFile.create_group("Metadata")
-    metaDataGrp.attrs["nbView"] = len(viewFileNames)
-    metaDataGrp.attrs["nbClass"] = len(labels_names)
-    metaDataGrp.attrs["datasetLength"] = len(labels)
-    datasetFile.close()
-    datasetFile, labelsDictionary, dataset_name = get_classic_db_hdf5(views,
-                                                                      pathF,
-                                                                      nameDB,
-                                                                      NB_CLASS,
-                                                                      askedLabelsNames,
-                                                                      random_state,
-                                                                      full,
-                                                                      path_for_new=path_for_new)
-
-    return datasetFile, labelsDictionary, dataset_name
-
-#
-# def get_classes(labels):
-#     labels_set = set(list(labels))
-#     nb_labels = len(labels_set)
-#     if nb_labels >= 2:
-#         return labels_set
-#     else:
-#         raise DatasetError("Dataset must have at least two different labels")
-#
-#
-# def all_asked_labels_are_available(asked_labels_names_set,
-#                                    available_labels_names):
-#     for askedLabelName in asked_labels_names_set:
-#         if askedLabelName in available_labels_names:
-#             pass
-#         else:
-#             return False
-#     return True
-#
-#
-# def fill_label_names(nb_labels, selected_label_names, random_state,
-#                      available_labels_names):
-#     if len(selected_label_names) < nb_labels:
-#         nb_labels_to_add = nb_labels - len(selected_label_names)
-#         labels_names_to_choose = [available_label_name
-#                                   for available_label_name
-#                                   in available_labels_names
-#                                   if available_label_name
-#                                   not in selected_label_names]
-#         added_labels_names = random_state.choice(labels_names_to_choose,
-#                                               nb_labels_to_add, replace=False)
-#         selected_label_names = list(selected_label_names) + list(added_labels_names)
-#         asked_labels_names_set = set(selected_label_names)
-#
-#     elif len(selected_label_names) > nb_labels:
-#         selected_label_names = list(
-#             random_state.choice(selected_label_names, nb_labels, replace=False))
-#         asked_labels_names_set = set(selected_label_names)
-#
-#     else:
-#         asked_labels_names_set = set(selected_label_names)
-#
-#     return selected_label_names, asked_labels_names_set
-#
-#
-# def get_all_labels(full_labels, available_labels_names):
-#     new_labels = full_labels
-#     new_labels_names = available_labels_names
-#     used_indices = np.arange(len(full_labels))
-#     return new_labels, new_labels_names, used_indices
-#
-#
-# def select_asked_labels(asked_labels_names_set, available_labels_names,
-#                         asked_labels_names, full_labels):
-#     if all_asked_labels_are_available(asked_labels_names_set, available_labels_names):
-#         used_labels = [available_labels_names.index(asked_label_name) for
-#                       asked_label_name in asked_labels_names]
-#         used_indices = np.array(
-#             [labelIndex for labelIndex, label in enumerate(full_labels) if
-#              label in used_labels])
-#         new_labels = np.array([used_labels.index(label) for label in full_labels if
-#                               label in used_labels])
-#         new_labels_names = [available_labels_names[usedLabel] for usedLabel in
-#                           used_labels]
-#         return new_labels, new_labels_names, used_indices
-#     else:
-#         raise DatasetError("Asked labels are not all available in the dataset")
-#
-#
-# def filter_labels(labels_set, asked_labels_names_set, full_labels,
-#                   available_labels_names, asked_labels_names):
-#     if len(labels_set) > 2:
-#         if asked_labels_names == available_labels_names:
-#             new_labels, new_labels_names, used_indices = \
-#                 get_all_labels(full_labels, available_labels_names)
-#         elif len(asked_labels_names_set) <= len(labels_set):
-#             new_labels, new_labels_names, used_indices = select_asked_labels(
-#                 asked_labels_names_set, available_labels_names,
-#                 asked_labels_names, full_labels)
-#         else:
-#             raise DatasetError(
-#                 "Asked more labels than available in the dataset. Available labels are : " +
-#                 ", ".join(available_labels_names))
-#
-#     else:
-#         new_labels, new_labels_names, used_indices = get_all_labels(full_labels,
-#                                                                     available_labels_names)
-#     return new_labels, new_labels_names, used_indices
-#
-#
-# def filter_views(dataset_file, temp_dataset, views, used_indices):
-#     new_view_index = 0
-#     if views == [""]:
-#         for view_index in range(dataset_file.get("Metadata").attrs["nbView"]):
-#             copyhdf5_dataset(dataset_file, temp_dataset, "View" + str(view_index),
-#                             "View" + str(view_index), used_indices)
-#     else:
-#         for asked_view_name in views:
-#             for view_index in range(dataset_file.get("Metadata").attrs["nbView"]):
-#                 view_name = dataset_file.get("View" + str(view_index)).attrs["name"]
-#                 if type(view_name) == bytes:
-#                     view_name = view_name.decode("utf-8")
-#                 if view_name == asked_view_name:
-#                     copyhdf5_dataset(dataset_file, temp_dataset,
-#                                     "View" + str(view_index),
-#                                     "View" + str(new_view_index), used_indices)
-#                     new_view_name = \
-#                     temp_dataset.get("View" + str(new_view_index)).attrs["name"]
-#                     if type(new_view_name) == bytes:
-#                         temp_dataset.get("View" + str(new_view_index)).attrs[
-#                             "name"] = new_view_name.decode("utf-8")
-#
-#                     new_view_index += 1
-#                 else:
-#                     pass
-#         temp_dataset.get("Metadata").attrs["nbView"] = len(views)
-#
-#
-# def copyhdf5_dataset(source_data_file, destination_data_file, source_dataset_name,
-#                      destination_dataset_name, used_indices):
-#     """Used to copy a view in a new dataset file using only the examples of
-#     usedIndices, and copying the args"""
-#     new_d_set = destination_data_file.create_dataset(destination_dataset_name,
-#                                                  data=source_data_file.get(
-#                                                       source_dataset_name).value[
-#                                                       used_indices, :])
-#     if "sparse" in source_data_file.get(source_dataset_name).attrs.keys() and \
-#             source_data_file.get(source_dataset_name).attrs["sparse"]:
-#         # TODO : Support sparse
-#         pass
-#     else:
-#         for key, value in source_data_file.get(source_dataset_name).attrs.items():
-#             new_d_set.attrs[key] = value
-
-
-#
-# def add_gaussian_noise(dataset_file, random_state, path_f, dataset_name,
-#                        noise_std=0.15):
-#     """In this function, we add a guaussian noise centered in 0 with specified
-#     std to each view, according to it's range (the noise will be
-#     mutliplied by this range) and we crop the noisy signal according to the
-#     view's attributes limits.
-#     This is done by creating a new dataset, to keep clean data."""
-#     noisy_dataset = h5py.File(path_f + dataset_name + "_noised.hdf5", "w")
-#     dataset_file.copy("Metadata", noisy_dataset)
-#     dataset_file.copy("Labels", noisy_dataset)
-#     for view_index in range(dataset_file.get("Metadata").attrs["nbView"]):
-#         dataset_file.copy("View" + str(view_index), noisy_dataset)
-#     for view_index in range(noisy_dataset.get("Metadata").attrs["nbView"]):
-#         view_name = "View" + str(view_index)
-#         view_dset = noisy_dataset.get(view_name)
-#         view_limits = dataset_file[
-#             "Metadata/View" + str(view_index) + "_limits"].value
-#         view_ranges = view_limits[:, 1] - view_limits[:, 0]
-#         normal_dist = random_state.normal(0, noise_std, view_dset.value.shape)
-#         noise = normal_dist * view_ranges
-#         noised_data = view_dset.value + noise
-#         noised_data = np.where(noised_data < view_limits[:, 0],
-#                                view_limits[:, 0], noised_data)
-#         noised_data = np.where(noised_data > view_limits[:, 1],
-#                                view_limits[:, 1], noised_data)
-#         noisy_dataset[view_name][...] = noised_data
-#     original_dataset_filename = dataset_file.filename
-#     dataset_file.close()
-#     noisy_dataset.close()
-#     noisy_dataset = h5py.File(path_f + dataset_name + "_noised.hdf5", "r")
-#     if "_temp_" in original_dataset_filename:
-#         os.remove(original_dataset_filename)
-#     return noisy_dataset, dataset_name + "_noised"
-
-
-# def getLabelSupports(CLASS_LABELS):
-#     """Used to get the number of example for each label"""
-#     labels = set(CLASS_LABELS)
-#     supports = [CLASS_LABELS.tolist().count(label) for label in labels]
-#     return supports, dict((label, index) for label, index in zip(labels, range(len(labels))))
-
-
-# def isUseful(labelSupports, index, CLASS_LABELS, labelDict):
-# if labelSupports[labelDict[CLASS_LABELS[index]]] != 0:
-#     labelSupports[labelDict[CLASS_LABELS[index]]] -= 1
-#     return True, labelSupports
-# else:
-#     return False, labelSupports
-
-
-# def splitDataset(DATASET, LEARNING_RATE, DATASET_LENGTH, random_state):
-#     LABELS = DATASET.get("Labels")[...]
-#     NB_CLASS = int(DATASET["Metadata"].attrs["nbClass"])
-#     validationIndices = extractRandomTrainingSet(LABELS, 1 - LEARNING_RATE, DATASET_LENGTH, NB_CLASS, random_state)
-#     validationIndices.sort()
-#     return validationIndices
-
-
-# def extractRandomTrainingSet(CLASS_LABELS, LEARNING_RATE, DATASET_LENGTH, NB_CLASS, random_state):
-#     labelSupports, labelDict = getLabelSupports(np.array(CLASS_LABELS))
-#     nbTrainingExamples = [int(support * LEARNING_RATE) for support in labelSupports]
-#     trainingExamplesIndices = []
-#     usedIndices = []
-#     while nbTrainingExamples != [0 for i in range(NB_CLASS)]:
-#         isUseFull = False
-#         index = int(random_state.randint(0, DATASET_LENGTH - 1))
-#         if index not in usedIndices:
-#             isUseFull, nbTrainingExamples = isUseful(nbTrainingExamples, index, CLASS_LABELS, labelDict)
-#         if isUseFull:
-#             trainingExamplesIndices.append(index)
-#             usedIndices.append(index)
-#     return trainingExamplesIndices
-
-
-# def getKFoldIndices(nbFolds, CLASS_LABELS, NB_CLASS, learningIndices, random_state):
-#     labelSupports, labelDict = getLabelSupports(np.array(CLASS_LABELS[learningIndices]))
-#     nbTrainingExamples = [[int(support / nbFolds) for support in labelSupports] for fold in range(nbFolds)]
-#     trainingExamplesIndices = []
-#     usedIndices = []
-#     for foldIndex, fold in enumerate(nbTrainingExamples):
-#         trainingExamplesIndices.append([])
-#         while fold != [0 for i in range(NB_CLASS)]:
-#             index = random_state.randint(0, len(learningIndices))
-#             if learningIndices[index] not in usedIndices:
-#                 isUseFull, fold = isUseful(fold, learningIndices[index], CLASS_LABELS, labelDict)
-#                 if isUseFull:
-#                     trainingExamplesIndices[foldIndex].append(learningIndices[index])
-#                     usedIndices.append(learningIndices[index])
-#     return trainingExamplesIndices
-#
-#
-# def getPositions(labelsUsed, fullLabels):
-#     usedIndices = []
-#     for labelIndex, label in enumerate(fullLabels):
-#         if label in labelsUsed:
-#             usedIndices.append(labelIndex)
-#     return usedIndices
-
-
-# def getCaltechDBcsv(views, pathF, nameDB, NB_CLASS, LABELS_NAMES, random_state):
-#     datasetFile = h5py.File(pathF + nameDB + ".hdf5", "w")
-#     labelsNamesFile = open(pathF + nameDB + '-ClassLabels-Description.csv')
-#     if len(LABELS_NAMES) != NB_CLASS:
-#         nbLabelsAvailable = 0
-#         for l in labelsNamesFile:
-#             nbLabelsAvailable += 1
-#         LABELS_NAMES = [line.strip().split(";")[1] for lineIdx, line in enumerate(labelsNamesFile) if
-#                         lineIdx in random_state.randint(nbLabelsAvailable, size=NB_CLASS)]
-#     fullLabels = np.genfromtxt(pathF + nameDB + '-ClassLabels.csv', delimiter=';').astype(int)
-#     labelsDictionary = dict((classIndice, labelName) for (classIndice, labelName) in
-#                             [(int(line.strip().split(";")[0]), line.strip().split(";")[1]) for lineIndex, line in
-#                              labelsNamesFile if line.strip().split(";")[0] in LABELS_NAMES])
-#     if len(set(fullLabels)) > NB_CLASS:
-#         usedIndices = getPositions(labelsDictionary.keys(), fullLabels)
-#     else:
-#         usedIndices = range(len(fullLabels))
-#     for viewIndex, view in enumerate(views):
-#         viewFile = pathF + nameDB + "-" + view + '.csv'
-#         viewMatrix = np.array(np.genfromtxt(viewFile, delimiter=';'))[usedIndices, :]
-#         viewDset = datasetFile.create_dataset("View" + str(viewIndex), viewMatrix.shape, data=viewMatrix)
-#         viewDset.attrs["name"] = view
-#
-#     labelsDset = datasetFile.create_dataset("Labels", fullLabels[usedIndices].shape, data=fullLabels[usedIndices])
-#
-#     metaDataGrp = datasetFile.create_group("Metadata")
-#     metaDataGrp.attrs["nbView"] = len(views)
-#     metaDataGrp.attrs["nbClass"] = NB_CLASS
-#     metaDataGrp.attrs["datasetLength"] = len(fullLabels[usedIndices])
-#     datasetFile.close()
-#     datasetFile = h5py.File(pathF + nameDB + ".hdf5", "r")
-#     return datasetFile, labelsDictionary
-
-# --------------------------------------------#
-# All the functions below are not useful     #
-# anymore but the binarization methods in    #
-# it must be kept                            #
-# --------------------------------------------#
-
-
-# def getMultiOmicDBcsv(features, path, name, NB_CLASS, LABELS_NAMES, random_state):
-#     datasetFile = h5py.File(path + "MultiOmic.hdf5", "w")
-#
-#     logging.debug("Start:\t Getting Methylation data")
-#     methylData = np.genfromtxt(path + "matching_methyl.csv", delimiter=',')
-#     methylDset = datasetFile.create_dataset("View0", methylData.shape)
-#     methylDset[...] = methylData
-#     methylDset.attrs["name"] = "Methyl"
-#     methylDset.attrs["sparse"] = False
-#     methylDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Methylation data")
-#
-#     logging.debug("Start:\t Getting MiRNA data")
-#     mirnaData = np.genfromtxt(path + "matching_mirna.csv", delimiter=',')
-#     mirnaDset = datasetFile.create_dataset("View1", mirnaData.shape)
-#     mirnaDset[...] = mirnaData
-#     mirnaDset.attrs["name"] = "MiRNA_"
-#     mirnaDset.attrs["sparse"] = False
-#     mirnaDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting MiRNA data")
-#
-#     logging.debug("Start:\t Getting RNASeq data")
-#     rnaseqData = np.genfromtxt(path + "matching_rnaseq.csv", delimiter=',')
-#     uselessRows = []
-#     for rowIndex, row in enumerate(np.transpose(rnaseqData)):
-#         if not row.any():
-#             uselessRows.append(rowIndex)
-#     usefulRows = [usefulRowIndex for usefulRowIndex in range(rnaseqData.shape[1]) if usefulRowIndex not in uselessRows]
-#     rnaseqDset = datasetFile.create_dataset("View2", (rnaseqData.shape[0], len(usefulRows)))
-#     rnaseqDset[...] = rnaseqData[:, usefulRows]
-#     rnaseqDset.attrs["name"] = "RNASeq_"
-#     rnaseqDset.attrs["sparse"] = False
-#     rnaseqDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting RNASeq data")
-#
-#     logging.debug("Start:\t Getting Clinical data")
-#     clinical = np.genfromtxt(path + "clinicalMatrix.csv", delimiter=',')
-#     clinicalDset = datasetFile.create_dataset("View3", clinical.shape)
-#     clinicalDset[...] = clinical
-#     clinicalDset.attrs["name"] = "Clinic"
-#     clinicalDset.attrs["sparse"] = False
-#     clinicalDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Clinical data")
-#
-#     labelFile = open(path + 'brca_labels_triple-negatif.csv')
-#     labels = np.array([int(line.strip().split(',')[1]) for line in labelFile])
-#     labelsDset = datasetFile.create_dataset("Labels", labels.shape)
-#     labelsDset[...] = labels
-#     labelsDset.attrs["name"] = "Labels"
-#
-#     metaDataGrp = datasetFile.create_group("Metadata")
-#     metaDataGrp.attrs["nbView"] = 4
-#     metaDataGrp.attrs["nbClass"] = 2
-#     metaDataGrp.attrs["datasetLength"] = len(labels)
-#     labelDictionary = {0: "No", 1: "Yes"}
-#     datasetFile.close()
-#     datasetFile = h5py.File(path + "MultiOmic.hdf5", "r")
-#     # datasetFile = getPseudoRNASeq(datasetFile)
-#     return datasetFile, labelDictionary
-#
-#
-# def getVector(nbGenes):
-#     argmax = [0, 0]
-#     maxi = 0
-#     for i in range(nbGenes):
-#         for j in range(nbGenes):
-#             if j == i + 1:
-#                 value = (i + 1) * (nbGenes - j)
-#                 if value > maxi:
-#                     maxi = value
-#                     argmax = [i, j]
-#     i, j = argmax
-#     vectorLeft = np.zeros(nbGenes, dtype=bool)
-#     vectorLeft[:i + 1] = np.ones(i + 1, dtype=bool)
-#     vectorSup = np.zeros(nbGenes, dtype=bool)
-#     vectorSup[j:] = np.ones(nbGenes - j, dtype=bool)
-#     matrixSup = j
-#     matrixInf = nbGenes - j
-#     return vectorLeft, matrixSup, matrixInf
-#
-#
-# def findClosestPowerOfTwo(factorizationParam):
-#     power = 1
-#     while factorizationParam - power > 0:
-#         power *= 2
-#     if abs(factorizationParam - power) < abs(factorizationParam - power / 2):
-#         return power
-#     else:
-#         return power / 2
-#
-#
-# def easyFactorize(nbGenes, factorizationParam, t=0):
-#     if math.log(factorizationParam + 1, 2) % 1 == 0.0:
-#         pass
-#     else:
-#         factorizationParam = findClosestPowerOfTwo(factorizationParam) - 1
-#
-#     if nbGenes == 2:
-#         return 1, np.array([True, False])
-#
-#     if nbGenes == 3:
-#         return 1, np.array([True, True, False])
-#
-#     if factorizationParam == 1:
-#         t = 1
-#         return t, getVector(nbGenes)[0]
-#
-#     vectorLeft, matrixSup, matrixInf = getVector(nbGenes)
-#
-#     t_, vectorLeftSup = easyFactorize(matrixSup, (factorizationParam - 1) / 2, t=t)
-#     t__, vectorLeftInf = easyFactorize(matrixInf, (factorizationParam - 1) / 2, t=t)
-#
-#     factorLeft = np.zeros((nbGenes, t_ + t__ + 1), dtype=bool)
-#
-#     factorLeft[:matrixSup, :t_] = vectorLeftSup.reshape(factorLeft[:matrixSup, :t_].shape)
-#     if nbGenes % 2 == 1:
-#         factorLeft[matrixInf - 1:, t_:t__ + t_] = vectorLeftInf.reshape(factorLeft[matrixInf - 1:, t_:t__ + t_].shape)
-#     else:
-#         factorLeft[matrixInf:, t_:t__ + t_] = vectorLeftInf.reshape(factorLeft[matrixInf:, t_:t__ + t_].shape)
-#     factorLeft[:, t__ + t_] = vectorLeft
-#
-#     # factorSup = np.zeros((t_+t__+1, nbGenes), dtype=bool)
-#     #
-#     # factorSup[:t_, :matrixSup] = vectorSupLeft.reshape(factorSup[:t_, :matrixSup].shape)
-#     # if nbGenes%2==1:
-#     #     factorSup[t_:t__+t_, matrixInf-1:] = vectorSupRight.reshape(factorSup[t_:t__+t_, matrixInf-1:].shape)
-#     # else:
-#     #     factorSup[t_:t__+t_, matrixInf:] = vectorSupRight.reshape(factorSup[t_:t__+t_, matrixInf:].shape)
-#     # factorSup[t__+t_, :] = vectorSup
-#     return t__ + t_ + 1, factorLeft  # , factorSup
-#
-#
-# def getBaseMatrices(nbGenes, factorizationParam, path):
-#     t, factorLeft = easyFactorize(nbGenes, factorizationParam)
-#     np.savetxt(path + "factorLeft--n-" + str(nbGenes) + "--k-" + str(factorizationParam) + ".csv", factorLeft,
-#                delimiter=",")
-#     return factorLeft
-#
-#
-# def findParams(arrayLen, nbPatients, random_state, maxNbBins=2000, minNbBins=10, maxLenBin=70000, minOverlapping=1,
-#                minNbBinsOverlapped=0, maxNbSolutions=30):
-#     results = []
-#     if arrayLen * arrayLen * 10 / 100 > minNbBinsOverlapped * nbPatients:
-#         for lenBin in range(arrayLen - 1):
-#             lenBin += 1
-#             if lenBin < maxLenBin and minNbBins * lenBin < arrayLen:
-#                 for overlapping in sorted(range(lenBin - 1), reverse=True):
-#                     overlapping += 1
-#                     if overlapping > minOverlapping and lenBin % (lenBin - overlapping) == 0:
-#                         for nbBins in sorted(range(arrayLen - 1), reverse=True):
-#                             nbBins += 1
-#                             if nbBins < maxNbBins:
-#                                 if arrayLen == (nbBins - 1) * (lenBin - overlapping) + lenBin:
-#                                     results.append({"nbBins": nbBins, "overlapping": overlapping, "lenBin": lenBin})
-#                                     if len(results) == maxNbSolutions:
-#                                         params = preds[random_state.randrange(len(preds))]
-#                                         return params
-#
-#
-# def findBins(nbBins=142, overlapping=493, lenBin=986):
-#     bins = []
-#     for binIndex in range(nbBins):
-#         bins.append([i + binIndex * (lenBin - overlapping) for i in range(lenBin)])
-#     return bins
-#
-#
-# def getBins(array, bins, lenBin, overlapping):
-#     binnedcoord = []
-#     for coordIndex, coord in enumerate(array):
-#         nbBinsFull = 0
-#         for binIndex, bin_ in enumerate(bins):
-#             if coordIndex in bin_:
-#                 binnedcoord.append(binIndex + (coord * len(bins)))
-#
-#     return np.array(binnedcoord)
-#
-#
-# def makeSortedBinsMatrix(nbBins, lenBins, overlapping, arrayLen, path):
-#     sortedBinsMatrix = np.zeros((arrayLen, nbBins), dtype=np.uint8)
-#     step = lenBins - overlapping
-#     for binIndex in range(nbBins):
-#         sortedBinsMatrix[step * binIndex:lenBins + (step * binIndex), binIndex] = np.ones(lenBins, dtype=np.uint8)
-#     np.savetxt(path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#                sortedBinsMatrix, delimiter=",")
-#     return sortedBinsMatrix
-#
-#
-# def makeSparseTotalMatrix(sortedRNASeq, random_state):
-#     nbPatients, nbGenes = sortedRNASeq.shape
-#     params = findParams(nbGenes, nbPatients, random_state)
-#     nbBins = params["nbBins"]
-#     overlapping = params["overlapping"]
-#     lenBin = params["lenBin"]
-#     bins = findBins(nbBins, overlapping, lenBin)
-#     sparseFull = sparse.csc_matrix((nbPatients, nbGenes * nbBins))
-#     for patientIndex, patient in enumerate(sortedRNASeq):
-#         columnIndices = getBins(patient, bins, lenBin, overlapping)
-#         rowIndices = np.zeros(len(columnIndices), dtype=int) + patientIndex
-#         data = np.ones(len(columnIndices), dtype=bool)
-#         sparseFull = sparseFull + sparse.csc_matrix((data, (rowIndices, columnIndices)),
-#                                                     shape=(nbPatients, nbGenes * nbBins))
-#     return sparseFull
-#
-#
-# def getAdjacenceMatrix(RNASeqRanking, sotredRNASeq, k=2):
-#     k = int(k) / 2 * 2
-#     indices = np.zeros((RNASeqRanking.shape[0] * k * RNASeqRanking.shape[1]), dtype=int)
-#     data = np.ones((RNASeqRanking.shape[0] * k * RNASeqRanking.shape[1]), dtype=bool)
-#     indptr = np.zeros(RNASeqRanking.shape[0] + 1, dtype=int)
-#     nbGenes = RNASeqRanking.shape[1]
-#     pointer = 0
-#     for patientIndex in range(RNASeqRanking.shape[0]):
-#         for i in range(nbGenes):
-#             for j in range(k / 2):
-#                 try:
-#                     indices[pointer] = RNASeqRanking[
-#                                            patientIndex, (sotredRNASeq[patientIndex, i] - (j + 1))] + i * nbGenes
-#                     pointer += 1
-#                 except:
-#                     pass
-#                 try:
-#                     indices[pointer] = RNASeqRanking[
-#                                            patientIndex, (sotredRNASeq[patientIndex, i] + (j + 1))] + i * nbGenes
-#                     pointer += 1
-#                 except:
-#                     pass
-#                     # elif i<=k:
-#                     # 	indices.append(patient[1]+patient[i]*nbGenes)
-#                     # 	data.append(True)
-#                     # elif i==nbGenes-1:
-#                     # 	indices.append(patient[i-1]+patient[i]*nbGenes)
-#                     # 	data.append(True)
-#         indptr[patientIndex + 1] = pointer
-#
-#     mat = sparse.csr_matrix((data, indices, indptr),
-#                             shape=(RNASeqRanking.shape[0], RNASeqRanking.shape[1] * RNASeqRanking.shape[1]), dtype=bool)
-#     return mat
-#
-#
-# def getKMultiOmicDBcsv(features, path, name, NB_CLASS, LABELS_NAMES):
-#     datasetFile = h5py.File(path + "KMultiOmic.hdf5", "w")
-#
-#     # logging.debug("Start:\t Getting Methylation data")
-#     methylData = np.genfromtxt(path + "matching_methyl.csv", delimiter=',')
-#     logging.debug("Done:\t Getting Methylation data")
-#
-#     logging.debug("Start:\t Getting Sorted Methyl data")
-#     Methyl = methylData
-#     sortedMethylGeneIndices = np.zeros(methylData.shape, dtype=int)
-#     MethylRanking = np.zeros(methylData.shape, dtype=int)
-#     for exampleIndex, exampleArray in enumerate(Methyl):
-#         sortedMethylDictionary = dict((index, value) for index, value in enumerate(exampleArray))
-#         sortedMethylIndicesDict = sorted(sortedMethylDictionary.items(), key=operator.itemgetter(1))
-#         sortedMethylIndicesArray = np.array([index for (index, value) in sortedMethylIndicesDict], dtype=int)
-#         sortedMethylGeneIndices[exampleIndex] = sortedMethylIndicesArray
-#         for geneIndex in range(Methyl.shape[1]):
-#             MethylRanking[exampleIndex, sortedMethylIndicesArray[geneIndex]] = geneIndex
-#     logging.debug("Done:\t Getting Sorted Methyl data")
-#
-#     logging.debug("Start:\t Getting Binarized Methyl data")
-#     k = findClosestPowerOfTwo(9) - 1
-#     try:
-#         factorizedLeftBaseMatrix = np.genfromtxt(
-#             path + "factorLeft--n-" + str(methylData.shape[1]) + "--k-" + str(k) + ".csv", delimiter=',')
-#     except:
-#         factorizedLeftBaseMatrix = getBaseMatrices(methylData.shape[1], k, path)
-#     bMethylDset = datasetFile.create_dataset("View0",
-#                                              (sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * k),
-#                                              dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], k), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = factorizedLeftBaseMatrix[lineIndex, :]
-#         bMethylDset[patientIndex] = patientMatrix.flatten()
-#     bMethylDset.attrs["name"] = "BMethyl" + str(k)
-#     bMethylDset.attrs["sparse"] = False
-#     bMethylDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized Methyl data")
-#
-#     logging.debug("Start:\t Getting Binned Methyl data")
-#     lenBins = 3298
-#     nbBins = 9
-#     overlapping = 463
-#     try:
-#         sortedBinsMatrix = np.genfromtxt(
-#             path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#             delimiter=",")
-#     except:
-#         sortedBinsMatrix = makeSortedBinsMatrix(nbBins, lenBins, overlapping, methylData.shape[1], path)
-#     binnedMethyl = datasetFile.create_dataset("View1", (
-#         sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * nbBins), dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], nbBins), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = sortedBinsMatrix[lineIndex, :]
-#         binnedMethyl[patientIndex] = patientMatrix.flatten()
-#     binnedMethyl.attrs["name"] = "bMethyl" + str(nbBins)
-#     binnedMethyl.attrs["sparse"] = False
-#     binnedMethyl.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binned Methyl data")
-#
-#     logging.debug("Start:\t Getting Binarized Methyl data")
-#     k = findClosestPowerOfTwo(17) - 1
-#     try:
-#         factorizedLeftBaseMatrix = np.genfromtxt(
-#             path + "factorLeft--n-" + str(methylData.shape[1]) + "--k-" + str(k) + ".csv", delimiter=',')
-#     except:
-#         factorizedLeftBaseMatrix = getBaseMatrices(methylData.shape[1], k, path)
-#     bMethylDset = datasetFile.create_dataset("View2",
-#                                              (sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * k),
-#                                              dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], k), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = factorizedLeftBaseMatrix[lineIndex, :]
-#         bMethylDset[patientIndex] = patientMatrix.flatten()
-#     bMethylDset.attrs["name"] = "BMethyl" + str(k)
-#     bMethylDset.attrs["sparse"] = False
-#     bMethylDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized Methyl data")
-#
-#     logging.debug("Start:\t Getting Binned Methyl data")
-#     lenBins = 2038
-#     nbBins = 16
-#     overlapping = 442
-#     try:
-#         sortedBinsMatrix = np.genfromtxt(
-#             path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#             delimiter=",")
-#     except:
-#         sortedBinsMatrix = makeSortedBinsMatrix(nbBins, lenBins, overlapping, methylData.shape[1], path)
-#     binnedMethyl = datasetFile.create_dataset("View3", (
-#         sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * nbBins), dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], nbBins), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = sortedBinsMatrix[lineIndex, :]
-#         binnedMethyl[patientIndex] = patientMatrix.flatten()
-#     binnedMethyl.attrs["name"] = "bMethyl" + str(nbBins)
-#     binnedMethyl.attrs["sparse"] = False
-#     binnedMethyl.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binned Methyl data")
-#
-#     labelFile = open(path + 'brca_labels_triple-negatif.csv')
-#     labels = np.array([int(line.strip().split(',')[1]) for line in labelFile])
-#     labelsDset = datasetFile.create_dataset("Labels", labels.shape)
-#     labelsDset[...] = labels
-#     labelsDset.attrs["name"] = "Labels"
-#
-#     metaDataGrp = datasetFile.create_group("Metadata")
-#     metaDataGrp.attrs["nbView"] = 4
-#     metaDataGrp.attrs["nbClass"] = 2
-#     metaDataGrp.attrs["datasetLength"] = len(labels)
-#     labelDictionary = {0: "No", 1: "Yes"}
-#
-#     datasetFile.close()
-#     datasetFile = h5py.File(path + "KMultiOmic.hdf5", "r")
-#
-#     return datasetFile, labelDictionary
-#
-#
-# def getKMultiOmicDBhdf5(features, path, name, NB_CLASS, LABELS_NAMES):
-#     datasetFile = h5py.File(path + "KMultiOmic.hdf5", "r")
-#     labelDictionary = {0: "No", 1: "Yes"}
-#     return datasetFile, labelDictionary
-#
-#
-# def getModifiedMultiOmicDBcsv(features, path, name, NB_CLASS, LABELS_NAMES):
-#     datasetFile = h5py.File(path + "ModifiedMultiOmic.hdf5", "w")
-#
-#     logging.debug("Start:\t Getting Methylation data")
-#     methylData = np.genfromtxt(path + "matching_methyl.csv", delimiter=',')
-#     methylDset = datasetFile.create_dataset("View0", methylData.shape)
-#     methylDset[...] = methylData
-#     methylDset.attrs["name"] = "Methyl_"
-#     methylDset.attrs["sparse"] = False
-#     methylDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Methylation data")
-#
-#     logging.debug("Start:\t Getting Sorted Methyl data")
-#     Methyl = datasetFile["View0"][...]
-#     sortedMethylGeneIndices = np.zeros(datasetFile.get("View0").shape, dtype=int)
-#     MethylRanking = np.zeros(datasetFile.get("View0").shape, dtype=int)
-#     for exampleIndex, exampleArray in enumerate(Methyl):
-#         sortedMethylDictionary = dict((index, value) for index, value in enumerate(exampleArray))
-#         sortedMethylIndicesDict = sorted(sortedMethylDictionary.items(), key=operator.itemgetter(1))
-#         sortedMethylIndicesArray = np.array([index for (index, value) in sortedMethylIndicesDict], dtype=int)
-#         sortedMethylGeneIndices[exampleIndex] = sortedMethylIndicesArray
-#         for geneIndex in range(Methyl.shape[1]):
-#             MethylRanking[exampleIndex, sortedMethylIndicesArray[geneIndex]] = geneIndex
-#     mMethylDset = datasetFile.create_dataset("View10", sortedMethylGeneIndices.shape, data=sortedMethylGeneIndices)
-#     mMethylDset.attrs["name"] = "SMethyl"
-#     mMethylDset.attrs["sparse"] = False
-#     mMethylDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Sorted Methyl data")
-#
-#     logging.debug("Start:\t Getting Binarized Methyl data")
-#     k = findClosestPowerOfTwo(58) - 1
-#     try:
-#         factorizedLeftBaseMatrix = np.genfromtxt(
-#             path + "factorLeft--n-" + str(datasetFile.get("View0").shape[1]) + "--k-" + str(k) + ".csv", delimiter=',')
-#     except:
-#         factorizedLeftBaseMatrix = getBaseMatrices(methylData.shape[1], k, path)
-#     bMethylDset = datasetFile.create_dataset("View11",
-#                                              (sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * k),
-#                                              dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], k), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = factorizedLeftBaseMatrix[lineIndex, :]
-#         bMethylDset[patientIndex] = patientMatrix.flatten()
-#     bMethylDset.attrs["name"] = "BMethyl"
-#     bMethylDset.attrs["sparse"] = False
-#     bMethylDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized Methyl data")
-#
-#     logging.debug("Start:\t Getting Binned Methyl data")
-#     lenBins = 2095
-#     nbBins = 58
-#     overlapping = 1676
-#     try:
-#         sortedBinsMatrix = np.genfromtxt(
-#             path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#             delimiter=",")
-#     except:
-#         sortedBinsMatrix = makeSortedBinsMatrix(nbBins, lenBins, overlapping, datasetFile.get("View0").shape[1], path)
-#     binnedMethyl = datasetFile.create_dataset("View12", (
-#         sortedMethylGeneIndices.shape[0], sortedMethylGeneIndices.shape[1] * nbBins), dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMethylGeneIndices):
-#         patientMatrix = np.zeros((sortedMethylGeneIndices.shape[1], nbBins), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = sortedBinsMatrix[lineIndex, :]
-#         binnedMethyl[patientIndex] = patientMatrix.flatten()
-#     binnedMethyl.attrs["name"] = "bMethyl"
-#     binnedMethyl.attrs["sparse"] = False
-#     binnedMethyl.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binned Methyl data")
-#
-#     logging.debug("Start:\t Getting MiRNA data")
-#     mirnaData = np.genfromtxt(path + "matching_mirna.csv", delimiter=',')
-#     mirnaDset = datasetFile.create_dataset("View1", mirnaData.shape)
-#     mirnaDset[...] = mirnaData
-#     mirnaDset.attrs["name"] = "MiRNA__"
-#     mirnaDset.attrs["sparse"] = False
-#     mirnaDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting MiRNA data")
-#
-#     logging.debug("Start:\t Getting Sorted MiRNA data")
-#     MiRNA = datasetFile["View1"][...]
-#     sortedMiRNAGeneIndices = np.zeros(datasetFile.get("View1").shape, dtype=int)
-#     MiRNARanking = np.zeros(datasetFile.get("View1").shape, dtype=int)
-#     for exampleIndex, exampleArray in enumerate(MiRNA):
-#         sortedMiRNADictionary = dict((index, value) for index, value in enumerate(exampleArray))
-#         sortedMiRNAIndicesDict = sorted(sortedMiRNADictionary.items(), key=operator.itemgetter(1))
-#         sortedMiRNAIndicesArray = np.array([index for (index, value) in sortedMiRNAIndicesDict], dtype=int)
-#         sortedMiRNAGeneIndices[exampleIndex] = sortedMiRNAIndicesArray
-#         for geneIndex in range(MiRNA.shape[1]):
-#             MiRNARanking[exampleIndex, sortedMiRNAIndicesArray[geneIndex]] = geneIndex
-#     mmirnaDset = datasetFile.create_dataset("View7", sortedMiRNAGeneIndices.shape, data=sortedMiRNAGeneIndices)
-#     mmirnaDset.attrs["name"] = "SMiRNA_"
-#     mmirnaDset.attrs["sparse"] = False
-#     mmirnaDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Sorted MiRNA data")
-#
-#     logging.debug("Start:\t Getting Binarized MiRNA data")
-#     k = findClosestPowerOfTwo(517) - 1
-#     try:
-#         factorizedLeftBaseMatrix = np.genfromtxt(
-#             path + "factorLeft--n-" + str(datasetFile.get("View1").shape[1]) + "--k-" + str(k) + ".csv", delimiter=',')
-#     except:
-#         factorizedLeftBaseMatrix = getBaseMatrices(mirnaData.shape[1], k, path)
-#     bmirnaDset = datasetFile.create_dataset("View8",
-#                                             (sortedMiRNAGeneIndices.shape[0], sortedMiRNAGeneIndices.shape[1] * k),
-#                                             dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMiRNAGeneIndices):
-#         patientMatrix = np.zeros((sortedMiRNAGeneIndices.shape[1], k), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = factorizedLeftBaseMatrix[lineIndex, :]
-#         bmirnaDset[patientIndex] = patientMatrix.flatten()
-#     bmirnaDset.attrs["name"] = "BMiRNA_"
-#     bmirnaDset.attrs["sparse"] = False
-#     bmirnaDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized MiRNA data")
-#
-#     logging.debug("Start:\t Getting Binned MiRNA data")
-#     lenBins = 14
-#     nbBins = 517
-#     overlapping = 12
-#     try:
-#         sortedBinsMatrix = np.genfromtxt(
-#             path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#             delimiter=",")
-#     except:
-#         sortedBinsMatrix = makeSortedBinsMatrix(nbBins, lenBins, overlapping, datasetFile.get("View1").shape[1], path)
-#     binnedMiRNA = datasetFile.create_dataset("View9", (
-#         sortedMiRNAGeneIndices.shape[0], sortedMiRNAGeneIndices.shape[1] * nbBins), dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedMiRNAGeneIndices):
-#         patientMatrix = np.zeros((sortedMiRNAGeneIndices.shape[1], nbBins), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = sortedBinsMatrix[lineIndex, :]
-#         binnedMiRNA[patientIndex] = patientMatrix.flatten()
-#     binnedMiRNA.attrs["name"] = "bMiRNA_"
-#     binnedMiRNA.attrs["sparse"] = False
-#     binnedMiRNA.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binned MiRNA data")
-#
-#     logging.debug("Start:\t Getting RNASeq data")
-#     rnaseqData = np.genfromtxt(path + "matching_rnaseq.csv", delimiter=',')
-#     uselessRows = []
-#     for rowIndex, row in enumerate(np.transpose(rnaseqData)):
-#         if not row.any():
-#             uselessRows.append(rowIndex)
-#     usefulRows = [usefulRowIndex for usefulRowIndex in range(rnaseqData.shape[1]) if usefulRowIndex not in uselessRows]
-#     rnaseqDset = datasetFile.create_dataset("View2", (rnaseqData.shape[0], len(usefulRows)))
-#     rnaseqDset[...] = rnaseqData[:, usefulRows]
-#     rnaseqDset.attrs["name"] = "RNASeq_"
-#     rnaseqDset.attrs["sparse"] = False
-#     rnaseqDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting RNASeq data")
-#
-#     logging.debug("Start:\t Getting Sorted RNASeq data")
-#     RNASeq = datasetFile["View2"][...]
-#     sortedRNASeqGeneIndices = np.zeros(datasetFile.get("View2").shape, dtype=int)
-#     RNASeqRanking = np.zeros(datasetFile.get("View2").shape, dtype=int)
-#     for exampleIndex, exampleArray in enumerate(RNASeq):
-#         sortedRNASeqDictionary = dict((index, value) for index, value in enumerate(exampleArray))
-#         sortedRNASeqIndicesDict = sorted(sortedRNASeqDictionary.items(), key=operator.itemgetter(1))
-#         sortedRNASeqIndicesArray = np.array([index for (index, value) in sortedRNASeqIndicesDict], dtype=int)
-#         sortedRNASeqGeneIndices[exampleIndex] = sortedRNASeqIndicesArray
-#         for geneIndex in range(RNASeq.shape[1]):
-#             RNASeqRanking[exampleIndex, sortedRNASeqIndicesArray[geneIndex]] = geneIndex
-#     mrnaseqDset = datasetFile.create_dataset("View4", sortedRNASeqGeneIndices.shape, data=sortedRNASeqGeneIndices)
-#     mrnaseqDset.attrs["name"] = "SRNASeq"
-#     mrnaseqDset.attrs["sparse"] = False
-#     mrnaseqDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Sorted RNASeq data")
-#
-#     logging.debug("Start:\t Getting Binarized RNASeq data")
-#     k = findClosestPowerOfTwo(100) - 1
-#     try:
-#         factorizedLeftBaseMatrix = np.genfromtxt(
-#             path + "factorLeft--n-" + str(datasetFile.get("View2").shape[1]) + "--k-" + str(100) + ".csv",
-#             delimiter=',')
-#     except:
-#         factorizedLeftBaseMatrix = getBaseMatrices(rnaseqData.shape[1], k, path)
-#     brnaseqDset = datasetFile.create_dataset("View5",
-#                                              (sortedRNASeqGeneIndices.shape[0], sortedRNASeqGeneIndices.shape[1] * k),
-#                                              dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedRNASeqGeneIndices):
-#         patientMatrix = np.zeros((sortedRNASeqGeneIndices.shape[1], k), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = factorizedLeftBaseMatrix[lineIndex, :]
-#         brnaseqDset[patientIndex] = patientMatrix.flatten()
-#     brnaseqDset.attrs["name"] = "BRNASeq"
-#     brnaseqDset.attrs["sparse"] = False
-#     brnaseqDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized RNASeq data")
-#
-#     logging.debug("Start:\t Getting Binned RNASeq data")
-#     lenBins = 986
-#     nbBins = 142
-#     overlapping = 493
-#     try:
-#         sortedBinsMatrix = np.genfromtxt(
-#             path + "sortedBinsMatrix--t-" + str(lenBins) + "--n-" + str(nbBins) + "--c-" + str(overlapping) + ".csv",
-#             delimiter=",")
-#     except:
-#         sortedBinsMatrix = makeSortedBinsMatrix(nbBins, lenBins, overlapping, datasetFile.get("View2").shape[1], path)
-#     binnedRNASeq = datasetFile.create_dataset("View6", (
-#         sortedRNASeqGeneIndices.shape[0], sortedRNASeqGeneIndices.shape[1] * nbBins), dtype=np.uint8)
-#     for patientIndex, patientSortedArray in enumerate(sortedRNASeqGeneIndices):
-#         patientMatrix = np.zeros((sortedRNASeqGeneIndices.shape[1], nbBins), dtype=np.uint8)
-#         for lineIndex, geneIndex in enumerate(patientSortedArray):
-#             patientMatrix[geneIndex] = sortedBinsMatrix[lineIndex, :]
-#         binnedRNASeq[patientIndex] = patientMatrix.flatten()
-#     binnedRNASeq.attrs["name"] = "bRNASeq"
-#     binnedRNASeq.attrs["sparse"] = False
-#     binnedRNASeq.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binned RNASeq data")
-#
-#     logging.debug("Start:\t Getting Clinical data")
-#     clinical = np.genfromtxt(path + "clinicalMatrix.csv", delimiter=',')
-#     clinicalDset = datasetFile.create_dataset("View3", clinical.shape)
-#     clinicalDset[...] = clinical
-#     clinicalDset.attrs["name"] = "Clinic_"
-#     clinicalDset.attrs["sparse"] = False
-#     clinicalDset.attrs["binary"] = False
-#     logging.debug("Done:\t Getting Clinical data")
-#
-#     logging.debug("Start:\t Getting Binarized Clinical data")
-#     binarized_clinical = np.zeros((347, 1951), dtype=np.uint8)
-#     nb_already_done = 0
-#     for feqtureIndex, feature in enumerate(np.transpose(clinical)):
-#         featureSet = set(feature)
-#         featureDict = dict((val, valIndex) for valIndex, val in enumerate(list(featureSet)))
-#         for valueIndex, value in enumerate(feature):
-#             binarized_clinical[valueIndex, featureDict[value] + nb_already_done] = 1
-#         nb_already_done += len(featureSet)
-#     bClinicalDset = datasetFile.create_dataset("View13", binarized_clinical.shape, dtype=np.uint8,
-#                                                data=binarized_clinical)
-#     bClinicalDset.attrs["name"] = "bClinic"
-#     bClinicalDset.attrs["sparse"] = False
-#     bClinicalDset.attrs["binary"] = True
-#     logging.debug("Done:\t Getting Binarized Clinical data")
-#
-#     # logging.debug("Start:\t Getting Adjacence RNASeq data")
-#     # sparseAdjRNASeq = getAdjacenceMatrix(RNASeqRanking, sortedRNASeqGeneIndices, k=findClosestPowerOfTwo(10)-1)
-#     # sparseAdjRNASeqGrp = datasetFile.create_group("View6")
-#     # dataDset = sparseAdjRNASeqGrp.create_dataset("data", sparseAdjRNASeq.data.shape, data=sparseAdjRNASeq.data)
-#     # indicesDset = sparseAdjRNASeqGrp.create_dataset("indices",
-#     # sparseAdjRNASeq.indices.shape, data=sparseAdjRNASeq.indices)
-#     # indptrDset = sparseAdjRNASeqGrp.create_dataset("indptr",
-#     # sparseAdjRNASeq.indptr.shape, data=sparseAdjRNASeq.indptr)
-#     # sparseAdjRNASeqGrp.attrs["name"]="ARNASeq"
-#     # sparseAdjRNASeqGrp.attrs["sparse"]=True
-#     # sparseAdjRNASeqGrp.attrs["shape"]=sparseAdjRNASeq.shape
-#     # logging.debug("Done:\t Getting Adjacence RNASeq data")
-#
-#     labelFile = open(path + 'brca_labels_triple-negatif.csv')
-#     labels = np.array([int(line.strip().split(',')[1]) for line in labelFile])
-#     labelsDset = datasetFile.create_dataset("Labels", labels.shape)
-#     labelsDset[...] = labels
-#     labelsDset.attrs["name"] = "Labels"
-#
-#     metaDataGrp = datasetFile.create_group("Metadata")
-#     metaDataGrp.attrs["nbView"] = 14
-#     metaDataGrp.attrs["nbClass"] = 2
-#     metaDataGrp.attrs["datasetLength"] = len(labels)
-#     labelDictionary = {0: "No", 1: "Yes"}
-#
-#     datasetFile.close()
-#     datasetFile = h5py.File(path + "ModifiedMultiOmic.hdf5", "r")
-#
-#     return datasetFile, labelDictionary
-#
-#
-# def getModifiedMultiOmicDBhdf5(features, path, name, NB_CLASS, LABELS_NAMES):
-#     datasetFile = h5py.File(path + "ModifiedMultiOmic.hdf5", "r")
-#     labelDictionary = {0: "No", 1: "Yes"}
-#     return datasetFile, labelDictionary
-#
-#
-# def getMultiOmicDBhdf5(features, path, name, NB_CLASS, LABELS_NAMES):
-#     datasetFile = h5py.File(path + "MultiOmic.hdf5", "r")
-#     labelDictionary = {0: "No", 1: "Yes"}
-#     return datasetFile, labelDictionary
-#
-#
-#
-# # def getOneViewFromDB(viewName, pathToDB, DBName):
-# #     view = np.genfromtxt(pathToDB + DBName +"-" + viewName, delimiter=';')
-# #     return view
-#
-#
-# # def getClassLabels(pathToDB, DBName):
-# #     labels = np.genfromtxt(pathToDB + DBName + "-" + "ClassLabels.csv", delimiter=';')
-# #     return labels
-#
-#
-# # def getDataset(pathToDB, viewNames, DBName):
-# #     dataset = []
-# #     for viewName in viewNames:
-# #         dataset.append(getOneViewFromDB(viewName, pathToDB, DBName))
-# #     return np.array(dataset)
-#
-#
-# # def getAwaLabels(nbLabels, pathToAwa):
-# #     labelsFile = open(pathToAwa + 'Animals_with_Attributes/classes.txt', 'U')
-# #     linesFile = [''.join(line.strip().split()).translate(None, digits) for line in labelsFile.readlines()]
-# #     return linesFile
-#
-#
-# # def getAwaDBcsv(views, pathToAwa, nameDB, nbLabels, LABELS_NAMES):
-# #     awaLabels = getAwaLabels(nbLabels, pathToAwa)
-# #     nbView = len(views)
-# #     nbMaxLabels = len(awaLabels)
-# #     if nbLabels == -1:
-# #         nbLabels = nbMaxLabels
-# #     nbNamesGiven = len(LABELS_NAMES)
-# #     if nbNamesGiven > nbLabels:
-# #         labelDictionary = {i:LABELS_NAMES[i] for i in np.arange(nbLabels)}
-# #     elif nbNamesGiven < nbLabels and nbLabels <= nbMaxLabels:
-# #         if LABELS_NAMES != ['']:
-# #             labelDictionary = {i:LABELS_NAMES[i] for i in np.arange(nbNamesGiven)}
-# #         else:
-# #             labelDictionary = {}
-# #             nbNamesGiven = 0
-# #         nbLabelsToAdd = nbLabels-nbNamesGiven
-# #         while nbLabelsToAdd > 0:
-# #             currentLabel = random.choice(awaLabels)
-# #             if currentLabel not in labelDictionary.values():
-# #                 labelDictionary[nbLabels-nbLabelsToAdd]=currentLabel
-# #                 nbLabelsToAdd -= 1
-# #             else:
-# #                 pass
-# #     else:
-# #         labelDictionary = {i: LABELS_NAMES[i] for i in np.arange(nbNamesGiven)}
-# #     viewDictionary = {i: views[i] for i in np.arange(nbView)}
-# #     rawData = []
-# #     labels = []
-# #     nbExample = 0
-# #     for view in np.arange(nbView):
-# #         viewData = []
-# #         for labelIndex in np.arange(nbLabels):
-# #             pathToExamples = pathToAwa + 'Animals_with_Attributes/Features/' + viewDictionary[view] + '/' + \
-# #                              labelDictionary[labelIndex] + '/'
-# #             examples = os.listdir(pathToExamples)
-# #             if view == 0:
-# #                 nbExample += len(examples)
-# #             for example in examples:
-# #                 if viewDictionary[view]=='decaf':
-# #                     exampleFile = open(pathToExamples + example)
-# #                     viewData.append([float(line.strip()) for line in exampleFile])
-# #                 else:
-# #                     exampleFile = open(pathToExamples + example)
-# #                     viewData.append([[float(coordinate) for coordinate in raw.split()] for raw in exampleFile][0])
-# #                 if view == 0:
-# #                     labels.append(labelIndex)
-# #
-# #         rawData.append(np.array(viewData))
-# #     data = rawData
-# #     DATASET_LENGTH = len(labels)
-# #     return data, labels, labelDictionary, DATASET_LENGTH
-# #
-# #
-# # def getDbfromCSV(path):
-# #     files = os.listdir(path)
-# #     DATA = np.zeros((3,40,2))
-# #     for file in files:
-# #         if file[-9:]=='moins.csv' and file[:7]=='sample1':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[0, i] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #         if file[-9:]=='moins.csv' and file[:7]=='sample2':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[1, i] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #         if file[-9:]=='moins.csv' and file[:7]=='sample3':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[2, i] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #
-# #     for file in files:
-# #         if file[-8:]=='plus.csv' and file[:7]=='sample1':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[0, i+20] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #         if file[-8:]=='plus.csv' and file[:7]=='sample2':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[1, i+20] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #         if file[-8:]=='plus.csv' and file[:7]=='sample3':
-# #             X = open(path+file)
-# #             for x, i in zip(X, range(20)):
-# #                 DATA[2, i+20] = np.array([float(coord) for coord in x.strip().split('\t')])
-# #     LABELS = np.zeros(40)
-# #     LABELS[:20]=LABELS[:20]+1
-# #     return DATA, LABELS
-#
-# # def makeArrayFromTriangular(pseudoRNASeqMatrix):
-# #     matrixShape = len(pseudoRNASeqMatrix[0,:])
-# #     exampleArray = np.array(((matrixShape-1)*matrixShape)/2)
-# #     arrayIndex = 0
-# #     for i in range(matrixShape-1):
-# #         for j in range(i+1, matrixShape):
-# #             exampleArray[arrayIndex]=pseudoRNASeqMatrix[i,j]
-# #             arrayIndex += 1
-# #     return exampleArray
-#
-#
-# # def getPseudoRNASeq(dataset):
-# #     nbGenes = len(dataset["/View2/matrix"][0, :])
-# #     pseudoRNASeq = np.zeros((dataset["/datasetlength"][...], ((nbGenes - 1) * nbGenes) / 2), dtype=bool_)
-# #     for exampleIndex in xrange(dataset["/datasetlength"][...]):
-# #         arrayIndex = 0
-# #         for i in xrange(nbGenes):
-# #             for j in xrange(nbGenes):
-# #                 if i > j:
-# #                     pseudoRNASeq[exampleIndex, arrayIndex] =
-# # dataset["/View2/matrix"][exampleIndex, j] < dataset["/View2/matrix"][exampleIndex, i]
-# #                     arrayIndex += 1
-# #     dataset["/View4/matrix"] = pseudoRNASeq
-# #     dataset["/View4/name"] = "pseudoRNASeq"
-# #     return dataset
-#
-#
-# # def allSame(array):
-# #     value = array[0]
-# #     areAllSame = True
-# #     for i in array:
-# #         if i != value:
-# #             areAllSame = False
-# #     return areAllSame
-
-
-# def getFakeDBhdf5(features, pathF, name, NB_CLASS, LABELS_NAME, random_state):
-#     """Was used to generateafake dataset to run tests"""
-#     NB_VIEW = 4
-#     DATASET_LENGTH = 30
-#     NB_CLASS = 2
-#     VIEW_DIMENSIONS = random_state.random_integers(5, 20, NB_VIEW)
-#
-#     DATA = dict((indx,
-#                  np.array([
-#                               random_state.normal(0.0, 2, viewDimension)
-#                               for i in np.arange(DATASET_LENGTH)]))
-#                 for indx, viewDimension in enumerate(VIEW_DIMENSIONS))
-#
-#     CLASS_LABELS = random_state.random_integers(0, NB_CLASS - 1, DATASET_LENGTH)
-#     datasetFile = h5py.File(pathF + "Fake.hdf5", "w")
-#     for index, viewData in enumerate(DATA.values()):
-#         if index == 0:
-#             viewData = random_state.randint(0, 1, (DATASET_LENGTH, 300)).astype(
-#                 np.uint8)
-#             # np.zeros(viewData.shape, dtype=bool)+np.ones((viewData.shape[0], viewData.shape[1]/2), dtype=bool)
-#             viewDset = datasetFile.create_dataset("View" + str(index), viewData.shape)
-#             viewDset[...] = viewData
-#             viewDset.attrs["name"] = "View" + str(index)
-#             viewDset.attrs["sparse"] = False
-#         elif index == 1:
-#             viewData = sparse.csr_matrix(viewData)
-#             viewGrp = datasetFile.create_group("View" + str(index))
-#             dataDset = viewGrp.create_dataset("data", viewData.data.shape, data=viewData.data)
-#             indicesDset = viewGrp.create_dataset("indices", viewData.indices.shape, data=viewData.indices)
-#             indptrDset = viewGrp.create_dataset("indptr", viewData.indptr.shape, data=viewData.indptr)
-#             viewGrp.attrs["name"] = "View" + str(index)
-#             viewGrp.attrs["sparse"] = True
-#             viewGrp.attrs["shape"] = viewData.shape
-#         else:
-#             viewDset = datasetFile.create_dataset("View" + str(index), viewData.shape)
-#             viewDset[...] = viewData
-#             viewDset.attrs["name"] = "View" + str(index)
-#             viewDset.attrs["sparse"] = False
-#     labelsDset = datasetFile.create_dataset("Labels", CLASS_LABELS.shape)
-#     labelsDset[...] = CLASS_LABELS
-#     labelsDset.attrs["name"] = "Labels"
-#
-#     metaDataGrp = datasetFile.create_group("Metadata")
-#     metaDataGrp.attrs["nbView"] = NB_VIEW
-#     metaDataGrp.attrs["nbClass"] = NB_CLASS
-#     metaDataGrp.attrs["datasetLength"] = len(CLASS_LABELS)
-#     labels_dictionary = {0: "No", 1: "Yes"}
-#     datasetFile.close()
-#     datasetFile = h5py.File(pathF + "Fake.hdf5", "r")
-#     return datasetFile, labels_dictionary
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py b/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
deleted file mode 100644
index a13f6cab00c038f0668ea88bc8bf3e1a88469860..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/hyper_parameter_search.py
+++ /dev/null
@@ -1,653 +0,0 @@
-import itertools
-import sys
-import traceback
-import yaml
-from abc import abstractmethod
-
-import matplotlib.pyplot as plt
-import numpy as np
-from scipy.stats import randint, uniform
-from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, \
-    ParameterGrid, ParameterSampler
-from sklearn.base import clone, BaseEstimator
-
-from .multiclass import MultiClassWrapper
-from .organization import secure_file_path
-from .base import get_metric
-from .. import metrics
-
-
-class HPSearch:
-
-    def get_scoring(self, metric):
-        if isinstance(metric, dict):
-            metric_module, metric_kwargs = get_metric(metric)
-            return metric_module.get_scorer(**metric_kwargs)
-        else:
-            return metric
-
-    def fit_multiview(self, X, y, groups=None, **fit_params):
-        n_splits = self.cv.get_n_splits(self.available_indices,
-                                        y[self.available_indices])
-        folds = list(
-            self.cv.split(self.available_indices, y[self.available_indices]))
-        self.get_candidate_params(X)
-        base_estimator = clone(self.estimator)
-        results = {}
-        self.cv_results_ = dict(("param_" + param_name, []) for param_name in
-                                self.candidate_params[0].keys())
-        self.cv_results_["mean_test_score"] = []
-        self.cv_results_["params"] = []
-        n_failed = 0
-        self.tracebacks_params = []
-        for candidate_param_idx, candidate_param in enumerate(self.candidate_params):
-            test_scores = np.zeros(n_splits) + 1000
-            try:
-                for fold_idx, (train_indices, test_indices) in enumerate(folds):
-                    current_estimator = clone(base_estimator)
-                    current_estimator.set_params(**candidate_param)
-                    current_estimator.fit(X, y,
-                                          train_indices=self.available_indices[
-                                              train_indices],
-                                          view_indices=self.view_indices)
-                    test_prediction = current_estimator.predict(
-                        X,
-                        self.available_indices[test_indices],
-                        view_indices=self.view_indices)
-                    test_score = self.scoring._score_func(
-                        y[self.available_indices[test_indices]],
-                        test_prediction,
-                        **self.scoring._kwargs)
-                    test_scores[fold_idx] = test_score
-                self.cv_results_['params'].append(
-                    current_estimator.get_params())
-                cross_validation_score = np.mean(test_scores)
-                self.cv_results_["mean_test_score"].append(
-                    cross_validation_score)
-                results[candidate_param_idx] = cross_validation_score
-                if cross_validation_score >= max(results.values()):
-                    self.best_params_ = self.candidate_params[candidate_param_idx]
-                    self.best_score_ = cross_validation_score
-            except:
-                if self.track_tracebacks:
-                    n_failed += 1
-                    self.tracebacks.append(traceback.format_exc())
-                    self.tracebacks_params.append(candidate_param)
-                else:
-                    raise
-        if n_failed == self.n_iter:
-            raise ValueError(
-                'No fits were performed. All HP combination returned errors \n\n' + '\n'.join(
-                    self.tracebacks))
-        self.cv_results_["mean_test_score"] = np.array(
-            self.cv_results_["mean_test_score"])
-        if self.refit:
-            self.best_estimator_ = clone(base_estimator).set_params(
-                **self.best_params_)
-            self.best_estimator_.fit(X, y, **fit_params)
-        self.n_splits_ = n_splits
-        return self
-
-    @abstractmethod
-    def get_candidate_params(self, X): # pragma: no cover
-        raise NotImplementedError
-
-    def get_best_params(self):
-        best_params = self.best_params_
-        if "random_state" in best_params:
-            best_params.pop("random_state")
-        return best_params
-
-    def gen_report(self, output_file_name):
-        scores_array = self.cv_results_['mean_test_score']
-        sorted_indices = np.argsort(-scores_array)
-        tested_params = [self.cv_results_["params"][score_index]
-                              for score_index in sorted_indices]
-        scores_array = scores_array[sorted_indices]
-        output_string = ""
-        for parameters, score in zip(tested_params, scores_array):
-            formatted_params = format_params(parameters)
-            output_string += "\n{}\n\t\t{}".format(yaml.dump(formatted_params), score)
-        if self.tracebacks:
-            output_string += "Failed : \n\n\n"
-            for traceback, params in zip(self.tracebacks, self.tracebacks_params):
-                output_string+= '{}\n\n{}\n'.format(params, traceback)
-        secure_file_path(output_file_name + "hps_report.txt")
-        with open(output_file_name + "hps_report.txt", "w") as output_file:
-            output_file.write(output_string)
-
-
-class Random(RandomizedSearchCV, HPSearch):
-
-    def __init__(self, estimator, param_distributions=None, n_iter=10,
-                 refit=False, n_jobs=1, scoring=None, cv=None,
-                 random_state=None, learning_indices=None, view_indices=None,
-                 framework="monoview",
-                 equivalent_draws=True, track_tracebacks=True):
-        if param_distributions is None:
-            param_distributions = self.get_param_distribs(estimator)
-        scoring = HPSearch.get_scoring(self, scoring)
-        RandomizedSearchCV.__init__(self, estimator, n_iter=n_iter,
-                                    param_distributions=param_distributions,
-                                    refit=refit, n_jobs=n_jobs, scoring=scoring,
-                                    cv=cv, random_state=random_state)
-        self.framework = framework
-        self.available_indices = learning_indices
-        self.view_indices = view_indices
-        self.equivalent_draws = equivalent_draws
-        self.track_tracebacks = track_tracebacks
-        self.tracebacks=[]
-
-    def get_param_distribs(self, estimator):
-        if isinstance(estimator, MultiClassWrapper):
-            return estimator.estimator.gen_distribs()
-        else:
-            return estimator.gen_distribs()
-
-    def fit(self, X, y=None, groups=None, **fit_params): # pragma: no cover
-        if self.framework == "monoview":
-            return RandomizedSearchCV.fit(self, X, y=y, groups=groups,
-                                          **fit_params)
-
-        elif self.framework == "multiview":
-            return HPSearch.fit_multiview(self, X, y=y, groups=groups,
-                                           **fit_params)
-
-    def get_candidate_params(self, X):
-        if self.equivalent_draws:
-            self.n_iter = self.n_iter * X.nb_view
-        self.candidate_params = list(
-            ParameterSampler(self.param_distributions, self.n_iter,
-                             random_state=self.random_state))
-
-    # def fit_multiview(self, X, y=None, groups=None, track_tracebacks=True,
-    #                   **fit_params):
-    #     n_splits = self.cv.get_n_splits(self.available_indices,
-    #                                     y[self.available_indices])
-
-
-
-
-class Grid(GridSearchCV, HPSearch):
-
-    def __init__(self, estimator, param_grid={}, refit=False, n_jobs=1, scoring=None, cv=None,
-                 learning_indices=None, view_indices=None, framework="monoview",
-                 random_state=None, track_tracebacks=True):
-        scoring = HPSearch.get_scoring(self, scoring)
-        GridSearchCV.__init__(self, estimator, param_grid, scoring=scoring,
-                              n_jobs=n_jobs, iid='deprecated', refit=refit,
-                              cv=cv)
-        self.framework = framework
-        self.available_indices = learning_indices
-        self.view_indices = view_indices
-        self.track_tracebacks = track_tracebacks
-        self.tracebacks = []
-
-    def fit(self, X, y=None, groups=None, **fit_params):
-        if self.framework == "monoview":
-            return GridSearchCV.fit(self, X, y=y, groups=groups,
-                                          **fit_params)
-        elif self.framework == "multiview":
-            return HPSearch.fit_multiview(self, X, y=y, groups=groups,
-                                           **fit_params)
-
-    def get_candidate_params(self, X):
-        self.candidate_params = list(ParameterGrid(self.param_grid))
-        self.n_iter = len(self.candidate_params)
-
-
-# class ParameterSamplerGrid:
-#
-#     def __init__(self, param_distributions, n_iter):
-#         from math import floor
-#         n_points_per_param = int(n_iter **(1/len(param_distributions)))
-#         selected_params = dict((param_name, [])
-#                                for param_name in param_distributions.keys())
-#         for param_name, distribution in param_distributions.items():
-#             if isinstance(distribution, list):
-#                 if len(distribution)<n_points_per_param:
-#                     selected_params[param_name] = distribution
-#                 else:
-#                     index_step = floor(len(distribution)/n_points_per_param-2)
-#                     selected_params[param_name] = distribution[0]+[distribution[index*index_step+1]
-#                                                    for index
-#                                                    in range(n_points_per_param)]
-
-
-
-
-#
-# def hps_search():
-#     pass
-#
-# def grid_search(X, y, framework, random_state, output_file_name,
-#                   classifier_module,
-#                   classifier_name, folds=4, nb_cores=1,
-#                   metric=["accuracy_score", None],
-#                   n_iter=30, classifier_kwargs={}, learning_indices=None,
-#                   view_indices=None,
-#                   equivalent_draws=True, grid_search_config=None):
-#     """Used to perfom gridsearch on the classifiers"""
-#     pass
-
-
-
-# class RS(HPSSearch):
-#
-#     def __init__(self, X, y, framework, random_state, output_file_name,
-#                       classifier_module,
-#                       classifier_name, folds=4, nb_cores=1,
-#                       metric=["accuracy_score", None],
-#                       n_iter=30, classifier_kwargs={}, learning_indices=None,
-#                       view_indices=None,
-#                       equivalent_draws=True):
-#         HPSSearch.__init__()
-
-
-
-# def randomized_search(X, y, framework, random_state, output_file_name,
-#                       classifier_module,
-#                       classifier_name, folds=4, nb_cores=1,
-#                       metric=["accuracy_score", None],
-#                       n_iter=30, classifier_kwargs={}, learning_indices=None,
-#                       view_indices=None,
-#                       equivalent_draws=True):
-#     estimator = getattr(classifier_module, classifier_name)(
-#         random_state=random_state,
-#         **classifier_kwargs)
-#     params_dict = estimator.gen_distribs()
-#     estimator = get_mc_estim(estimator, random_state,
-#                              multiview=(framework == "multiview"),
-#                              y=y)
-#     if params_dict:
-#         metric_module, metric_kwargs = get_metric(metric)
-#         scorer = metric_module.get_scorer(**metric_kwargs)
-#         # nb_possible_combinations = compute_possible_combinations(params_dict)
-#         # n_iter_real = min(n_iter, nb_possible_combinations)
-#
-#         random_search = MultiviewCompatibleRandomizedSearchCV(estimator,
-#                                                               n_iter=n_iter,
-#                                                               param_distributions=params_dict,
-#                                                               refit=True,
-#                                                               n_jobs=nb_cores,
-#                                                               scoring=scorer,
-#                                                               cv=folds,
-#                                                               random_state=random_state,
-#                                                               learning_indices=learning_indices,
-#                                                               view_indices=view_indices,
-#                                                               framework=framework,
-#                                                               equivalent_draws=equivalent_draws)
-#         random_search.fit(X, y)
-#         return random_search.transform_results()
-#     else:
-#         best_estimator = estimator
-#         best_params = {}
-#         scores_array = {}
-#         params = {}
-#         test_folds_preds = np.zeros(10)#get_test_folds_preds(X, y, folds, best_estimator,
-#                                           # framework, learning_indices)
-#         return best_params, scores_array, params
-
-
-
-
-
-
-
-
-
-#
-# def spear_mint(dataset, classifier_name, views_indices=None, k_folds=None,
-#                n_iter=1,
-#                **kwargs):
-#     """Used to perform spearmint on the classifiers to optimize hyper parameters,
-#     longer than randomsearch (can't be parallelized)"""
-#     pass
-#
-#
-# def gen_heat_maps(params, scores_array, output_file_name):
-#     """Used to generate a heat map for each doublet of hyperparms
-#     optimized on the previous function"""
-#     nb_params = len(params)
-#     if nb_params > 2:
-#         combinations = itertools.combinations(range(nb_params), 2)
-#     elif nb_params == 2:
-#         combinations = [(0, 1)]
-#     else:
-#         combinations = [()]
-#     for combination in combinations:
-#         if combination:
-#             param_name1, param_array1 = params[combination[0]]
-#             param_name2, param_array2 = params[combination[1]]
-#         else:
-#             param_name1, param_array1 = params[0]
-#             param_name2, param_array2 = ("Control", np.array([0]))
-#
-#         param_array1_set = np.sort(np.array(list(set(param_array1))))
-#         param_array2_set = np.sort(np.array(list(set(param_array2))))
-#
-#         scores_matrix = np.zeros(
-#             (len(param_array2_set), len(param_array1_set))) - 0.1
-#         for param1, param2, score in zip(param_array1, param_array2,
-#                                          scores_array):
-#             param1_index, = np.where(param_array1_set == param1)
-#             param2_index, = np.where(param_array2_set == param2)
-#             scores_matrix[int(param2_index), int(param1_index)] = score
-#
-#         plt.figure(figsize=(8, 6))
-#         plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
-#         plt.imshow(scores_matrix, interpolation='nearest', cmap=plt.cm.hot,
-#                    )
-#         plt.xlabel(param_name1)
-#         plt.ylabel(param_name2)
-#         plt.colorbar()
-#         plt.xticks(np.arange(len(param_array1_set)), param_array1_set)
-#         plt.yticks(np.arange(len(param_array2_set)), param_array2_set,
-#                    rotation=45)
-#         plt.title('Validation metric')
-#         plt.savefig(
-#             output_file_name + "heat_map-" + param_name1 + "-" + param_name2 + ".png",
-#             transparent=True)
-#         plt.close()
-#
-
-
-
-class CustomRandint:
-    """Used as a distribution returning a integer between low and high-1.
-    It can be used with a multiplier agrument to be able to perform more complex generation
-    for example 10 e -(randint)"""
-
-    def __init__(self, low=0, high=0, multiplier=""):
-        self.randint = randint(low, high)
-        self.low=low
-        self.high=high
-        self.multiplier = multiplier
-
-    def rvs(self, random_state=None):
-        randinteger = self.randint.rvs(random_state=random_state)
-        if self.multiplier == "e-":
-            return 10 ** -randinteger
-        else:
-            return randinteger
-
-    def get_nb_possibilities(self):
-        if self.multiplier == "e-":
-            return abs(10 ** -self.low - 10 ** -self.high)
-        else:
-            return self.high - self.low
-
-
-class CustomUniform:
-    """Used as a distribution returning a float between loc and loc + scale..
-        It can be used with a multiplier agrument to be able to perform more complex generation
-        for example 10 e -(float)"""
-
-    def __init__(self, loc=0, state=1, multiplier=""):
-        self.uniform = uniform(loc, state)
-        self.multiplier = multiplier
-
-    def rvs(self, random_state=None):
-        unif = self.uniform.rvs(random_state=random_state)
-        if self.multiplier == 'e-':
-            return 10 ** -unif
-        else:
-            return unif
-
-
-def format_params(params, pref=""):
-    if isinstance(params, dict):
-        dictionary = {}
-        for key, value in params.items():
-            if isinstance(value, np.random.RandomState):
-                pass
-            elif isinstance(value, BaseEstimator):
-                dictionary[key] = value.__class__.__name__
-                for second_key, second_value in format_params(value.get_params()).items():
-                    dictionary[str(key)+"__"+second_key] = second_value
-            else:
-                dictionary[str(key)] = format_params(value)
-        return dictionary
-    elif isinstance(params, np.ndarray):
-        return [format_params(param) for param in params]
-    elif isinstance(params, np.float64):
-        return float(params)
-    elif isinstance(params, np.int64):
-        return int(params)
-    elif isinstance(params, list):
-        return [format_params(param) for param in params]
-    elif isinstance(params, np.str_):
-        return str(params)
-    else:
-        return params
-
-
-# def randomized_search_(dataset_var, labels, classifier_package, classifier_name,
-#                       metrics_list, learning_indices, k_folds, random_state,
-#                       views_indices=None, n_iter=1,
-#                       nb_cores=1, **classification_kargs):
-#     """Used to perform a random search on the classifiers to optimize hyper parameters"""
-#     if views_indices is None:
-#         views_indices = range(dataset_var.get("Metadata").attrs["nbView"])
-#     metric = metrics_list[0]
-#     metric_module = getattr(metrics, metric[0])
-#     if metric[1] is not None:
-#         metric_kargs = dict((index, metricConfig) for index, metricConfig in
-#                             enumerate(metric[1]))
-#     else:
-#         metric_kargs = {}
-#     classifier_module = getattr(classifier_package, classifier_name + "Module")
-#     classifier_class = getattr(classifier_module, classifier_name + "Class")
-#     if classifier_name != "Mumbo":
-#         params_sets = classifier_module.gen_params_sets(classification_kargs,
-#                                                     random_state, n_iter=n_iter)
-#         if metric_module.getConfig()[-14] == "h":
-#             base_score = -1000.0
-#             is_better = "higher"
-#         else:
-#             base_score = 1000.0
-#             is_better = "lower"
-#         best_settings = None
-#         kk_folds = k_folds.split(learning_indices, labels[learning_indices])
-#         for params_set in params_sets:
-#             scores = []
-#             for trainIndices, testIndices in kk_folds:
-#                 classifier = classifier_class(random_state, nb_scores=nb_cores,
-#                                              **classification_kargs)
-#                 classifier.setParams(params_set)
-#                 classifier.fit_hdf5(dataset_var, labels,
-#                                     train_indices=learning_indices[trainIndices],
-#                                     views_indices=views_indices)
-#                 test_labels = classifier.predict_hdf5(dataset_var,
-#                                                       used_indices=learning_indices[testIndices],
-#                                                       views_indices=views_indices)
-#                 test_score = metric_module.score(
-#                     labels[learning_indices[testIndices]], test_labels)
-#                 scores.append(test_score)
-#             cross_val_score = np.mean(np.array(scores))
-#
-#             if is_better == "higher" and cross_val_score > base_score:
-#                 base_score = cross_val_score
-#                 best_settings = params_set
-#             elif is_better == "lower" and cross_val_score < base_score:
-#                 base_score = cross_val_score
-#                 best_settings = params_set
-#         classifier = classifier_class(random_state, nb_cores=nb_cores,
-#                                      **classification_kargs)
-#         classifier.setParams(best_settings)
-#
-#     # TODO : This must be corrected
-#     else:
-#         best_configs, _ = classifier_module.grid_search_hdf5(dataset_var, labels,
-#                                                              views_indices,
-#                                                              classification_kargs,
-#                                                              learning_indices,
-#                                                              random_state,
-#                                                              metric=metric,
-#                                                              nI_iter=n_iter)
-#         classification_kargs["classifiersConfigs"] = best_configs
-#         classifier = classifier_class(random_state, nb_cores=nb_cores,
-#                                       **classification_kargs)
-#
-#     return classifier
-
-#
-# def compute_possible_combinations(params_dict):
-#     n_possibs = np.ones(len(params_dict)) * np.inf
-#     for value_index, value in enumerate(params_dict.values()):
-#         if type(value) == list:
-#             n_possibs[value_index] = len(value)
-#         elif isinstance(value, CustomRandint):
-#             n_possibs[value_index] = value.get_nb_possibilities()
-#     return np.prod(n_possibs)
-
-
-# def get_test_folds_preds(X, y, cv, estimator, framework,
-#                          available_indices=None):
-#     test_folds_prediction = []
-#     if framework == "monoview":
-#         folds = cv.split(np.arange(len(y)), y)
-#     if framework == "multiview":
-#         folds = cv.split(available_indices, y[available_indices])
-#     fold_lengths = np.zeros(cv.n_splits, dtype=int)
-#     for fold_idx, (train_indices, test_indices) in enumerate(folds):
-#         fold_lengths[fold_idx] = len(test_indices)
-#         if framework == "monoview":
-#             estimator.fit(X[train_indices], y[train_indices])
-#             test_folds_prediction.append(estimator.predict(X[train_indices]))
-#         if framework == "multiview":
-#             estimator.fit(X, y, available_indices[train_indices])
-#             test_folds_prediction.append(
-#                 estimator.predict(X, available_indices[test_indices]))
-#     min_fold_length = fold_lengths.min()
-#     test_folds_prediction = np.array(
-#         [test_fold_prediction[:min_fold_length] for test_fold_prediction in
-#          test_folds_prediction])
-#     return test_folds_prediction
-
-
-# nohup python ~/dev/git/spearmint/spearmint/main.py . &
-
-# import json
-# import numpy as np
-# import math
-#
-# from os import system
-# from os.path import join
-#
-#
-# def run_kover(dataset, split, model_type, p, max_rules, output_dir):
-#     outdir = join(output_dir, "%s_%f" % (model_type, p))
-#     kover_command = "kover learn " \
-#                     "--dataset '%s' " \
-#                     "--split %s " \
-#                     "--model-type %s " \
-#                     "--p %f " \
-#                     "--max-rules %d " \
-#                     "--max-equiv-rules 10000 " \
-#                     "--hp-choice cv " \
-#                     "--random-seed 0 " \
-#                     "--output-dir '%s' " \
-#                     "--n-cpu 1 " \
-#                     "-v" % (dataset,
-#                             split,
-#                             model_type,
-#                             p,
-#                             max_rules,
-#                             outdir)
-#
-#     system(kover_command)
-#
-#     return json.load(open(join(outdir, "results.json")))["cv"]["best_hp"]["score"]
-#
-#
-# def main(job_id, params):
-#     print params
-#
-#     max_rules = params["MAX_RULES"][0]
-#
-#     species = params["SPECIES"][0]
-#     antibiotic = params["ANTIBIOTIC"][0]
-#     split = params["SPLIT"][0]
-#
-#     model_type = params["model_type"][0]
-#
-#     # LS31
-#     if species == "saureus":
-#         dataset_path = "/home/droale01/droale01-ls31/projects/genome_scm/data/earle_2016/saureus/kover_datasets/%s.kover" % antibiotic
-#     else:
-#         dataset_path = "/home/droale01/droale01-ls31/projects/genome_scm/genome_scm_paper/data/%s/%s.kover" % (species, antibiotic)
-#
-#     output_path = "/home/droale01/droale01-ls31/projects/genome_scm/manifold_scm/spearmint/vanilla_scm/%s/%s" % (species, antibiotic)
-#
-#     # MacBook
-#     #dataset_path = "/Volumes/Einstein 1/kover_phylo/datasets/%s/%s.kover" % (species, antibiotic)
-#     #output_path = "/Volumes/Einstein 1/manifold_scm/version2/%s_spearmint" % antibiotic
-#
-#     return run_kover(dataset=dataset_path,
-#                      split=split,
-#                      model_type=model_type,
-#                      p=params["p"][0],
-#                      max_rules=max_rules,
-#                      output_dir=output_path)
-# killall mongod && sleep 1 && rm -r database/* && rm mongo.log*
-# mongod --fork --logpath mongo.log --dbpath database
-#
-# {
-#     "language"        : "PYTHON",
-#     "experiment-name" : "vanilla_scm_cdiff_azithromycin",
-#     "polling-time"    : 1,
-#     "resources" : {
-#         "my-machine" : {
-#             "scheduler"         : "local",
-#             "max-concurrent"    : 5,
-#             "max-finished-jobs" : 100
-#         }
-#     },
-#     "tasks": {
-#         "resistance" : {
-#             "type"       : "OBJECTIVE",
-#             "likelihood" : "NOISELESS",
-#             "main-file"  : "spearmint_wrapper",
-#             "resources"  : ["my-machine"]
-#         }
-#     },
-#     "variables": {
-#
-#         "MAX_RULES" : {
-#             "type" : "ENUM",
-#             "size" : 1,
-#             "options": [10]
-#         },
-#
-#
-#         "SPECIES" : {
-#             "type" : "ENUM",
-#             "size" : 1,
-#             "options": ["cdiff"]
-#         },
-#         "ANTIBIOTIC" : {
-#             "type" : "ENUM",
-#             "size" : 1,
-#             "options": ["azithromycin"]
-#         },
-#         "SPLIT" : {
-#             "type" : "ENUM",
-#             "size" : 1,
-#             "options": ["split_seed_2"]
-#         },
-#
-#
-#         "model_type" : {
-#             "type" : "ENUM",
-#             "size" : 1,
-#             "options": ["conjunction", "disjunction"]
-#         },
-#         "p" : {
-#             "type" : "FLOAT",
-#             "size" : 1,
-#             "min"  : 0.01,
-#             "max"  : 100
-#         }
-#     }
-# }
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py b/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py
deleted file mode 100644
index 5810e37bdddd002a96ff73d97d37d8f85245fbe9..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/make_file_config.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import importlib
-import inspect
-
-class ConfigurationMaker():
-    """
-    Find the name of the classifier from the dict classier to report
-
-
-
-    """
-    _path_classifier_mono = 'multiview_platform/mono_multi_view_classifier/monoview_classifiers'
-    _path_classifier_multi = 'multiview_platform/mono_multi_view_classifier/multiview_classifier'
-
-    def __init__(self, classifier_dict=None):
-        if classifier_dict is None:
-            classifier_dict = {"0": ['mono', 'Adaboost',
-                                     'multiview_platform.mono_multi_view_classifiers.monoview_classifiers.adaboost']}
-        names = []
-        for key, val in classifier_dict.items():
-            mymodule = importlib.import_module(val[2])
-            names.append(self._get_module_name(mymodule))
-            monInstance = getattr(mymodule, val[1])
-
-    def _get_module_name(self, mymodule):
-        for name in dir(mymodule):
-            att = getattr(mymodule, name)
-            try:
-                getattr(att, "__module__")
-                if att.__module__.startswith(mymodule.__name__):
-                    if inspect.isclass(att):
-                        if att == val[1]:
-                            return name
-            except Exception:
-                return None
-        return None
-
-
-if __name__ == '__main__':
-    ConfigurationMaker()
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py b/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py
deleted file mode 100644
index 0b7210a76bf0bf98596b0c5309ca69b746fb5040..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/multiclass.py
+++ /dev/null
@@ -1,323 +0,0 @@
-import array
-
-import numpy as np
-import scipy.sparse as sp
-from sklearn.base import clone, is_classifier, is_regressor
-from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
-from sklearn.multiclass import _ovr_decision_function
-from sklearn.preprocessing import LabelBinarizer
-
-from .dataset import get_examples_views_indices
-
-
-def get_mc_estim(estimator, random_state, y=None, multiview=False,
-                 multiclass=False):
-    r"""Used to get a multiclass-compatible estimator if the one in param does not natively support multiclass.
-    If perdict_proba is available in the asked estimator, a One Versus Rest wrapper is returned,
-    else, a One Versus One wrapper is returned.
-
-    To be able to deal with multiview algorithm, multiview wrappers are implemented separately.
-
-    Parameters
-    ----------
-    estimator : sklearn-like estimator
-        Asked estimator
-    y : numpy.array
-        The labels of the problem
-    random_state : numpy.random.RandomState object
-        The random state, used to generate a fake multiclass problem
-    multiview : bool
-        If True, mutliview-compatible wrappers are returned.
-
-    Returns
-    -------
-    estimator : sklearn-like estimator
-        Either the aksed estimator, or a multiclass-compatible wrapper over the asked estimator
-    """
-    if (y is not None and np.unique(y).shape[0] > 2) or multiclass:
-        if not clone(estimator).accepts_multi_class(random_state):
-            if hasattr(estimator, "predict_proba"):
-                if multiview:
-                    estimator = MultiviewOVRWrapper(estimator)
-                else:
-                    estimator = OVRWrapper(estimator)
-            else:
-                if multiview:
-                    estimator = MultiviewOVOWrapper(estimator)
-                else:
-                    estimator = OVOWrapper(estimator)
-    return estimator
-
-
-class MultiClassWrapper:
-
-    # TODO : Has an effect on the init of the sub-classes.
-    # @abstractmethod
-    # def __init__(self, estimator, **params):
-    #     self.estimator = estimator
-
-    def set_params(self, **params):
-        r"""
-        This function is useful in order for the OV_Wrappers to be transparent
-        in terms of parameters.
-        If we remove it the parameters have to be specified as estimator__param.
-        Witch is not relevant for the platform
-
-        """
-        self.estimator.set_params(**params)
-        return self
-
-    def get_config(self):
-        return "multiclass_adaptation : "+self.__class__.__name__+ ", " +self.estimator.get_config()
-
-    def format_params(self, params, deep=True):
-        if hasattr(self, 'estimators_'):
-            estim_params = self.estimators_[0].get_params(deep=deep)
-            for key, value in params.items():
-                if key.startswith("estimator__"):
-                    estim_param_key = '__'.join(key.split('__')[1:])
-                    params[key] = estim_params[estim_param_key]
-            params.pop("estimator")
-        return params
-
-
-
-    def get_interpretation(self, directory, base_file_name, y_test=None):
-        # TODO : Multiclass interpretation
-        return "Multiclass wrapper is not interpretable yet"
-
-
-class MonoviewWrapper(MultiClassWrapper):
-    pass
-
-
-class OVRWrapper(MonoviewWrapper, OneVsRestClassifier):
-
-    def get_params(self, deep=True):
-        return self.format_params(
-            OneVsRestClassifier.get_params(self, deep=deep), deep=deep)
-
-
-class OVOWrapper(MonoviewWrapper, OneVsOneClassifier):
-    def decision_function(self, X):
-        # check_is_fitted(self)
-
-        indices = self.pairwise_indices_
-        if indices is None:
-            Xs = [X] * len(self.estimators_)
-        else:
-            Xs = [X[:, idx] for idx in indices]
-
-        predictions = np.vstack([est.predict(Xi)
-                                 for est, Xi in zip(self.estimators_, Xs)]).T
-        confidences = np.ones(predictions.shape)
-        Y = _ovr_decision_function(predictions,
-                                   confidences, len(self.classes_))
-        if self.n_classes_ == 2:
-            return Y[:, 1]
-        return Y
-
-    def get_params(self, deep=True):
-        return self.format_params(
-            OneVsOneClassifier.get_params(self, deep=deep), deep=deep)
-
-
-# The following code is a mutliview adaptation of sklearns multiclass package
-
-def _multiview_fit_binary(estimator, X, y, train_indices,
-                          view_indices, classes=None, ):
-    # TODO : Verifications des sklearn
-    estimator = clone(estimator)
-    estimator.fit(X, y, train_indices=train_indices,
-                  view_indices=view_indices)
-    return estimator
-
-
-def _multiview_predict_binary(estimator, X, example_indices, view_indices):
-    if is_regressor(estimator):
-        return estimator.predict(X, example_indices=example_indices,
-                                 view_indices=view_indices)
-    try:
-        score = np.ravel(estimator.decision_function(X))
-    except (AttributeError, NotImplementedError):
-        # probabilities of the positive class
-        score = estimator.predict_proba(X, example_indices=example_indices,
-                                        view_indices=view_indices)[:, 1]
-    return score
-
-
-class MultiviewWrapper(MultiClassWrapper):
-
-    def __init__(self, estimator=None, **args):
-        super(MultiviewWrapper, self).__init__(estimator=estimator, **args)
-        self.short_name = estimator.short_name
-
-
-class MultiviewOVRWrapper(MultiviewWrapper, OneVsRestClassifier):
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        self.label_binarizer_ = LabelBinarizer(sparse_output=True)
-        Y = self.label_binarizer_.fit_transform(y)
-        Y = Y.tocsc()
-        self.classes_ = self.label_binarizer_.classes_
-        columns = (col.toarray().ravel() for col in Y.T)
-        # In cases where individual estimators are very fast to train setting
-        # n_jobs > 1 in can results in slower performance due to the overhead
-        # of spawning threads.  See joblib issue #112.
-        self.estimators_ = [_multiview_fit_binary(
-            self.estimator, X, column, classes=[
-                "not %s" % self.label_binarizer_.classes_[i],
-                self.label_binarizer_.classes_[i]], train_indices=train_indices,
-            view_indices=view_indices)
-            for i, column in
-            enumerate(columns)]
-        return self
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                   example_indices,
-                                                                   view_indices)
-        n_samples = len(example_indices)
-        if self.label_binarizer_.y_type_ == "multiclass":
-            maxima = np.empty(n_samples, dtype=float)
-            maxima.fill(-np.inf)
-            argmaxima = np.zeros(n_samples, dtype=int)
-            for i, e in enumerate(self.estimators_):
-                pred = _multiview_predict_binary(e, X, example_indices,
-                                                 view_indices)
-                np.maximum(maxima, pred, out=maxima)
-                argmaxima[maxima == pred] = i
-            return self.classes_[argmaxima]
-        else: # pragma: no cover
-            if (hasattr(self.estimators_[0], "decision_function") and
-                    is_classifier(self.estimators_[0])):
-                thresh = 0
-            else:
-                thresh = .5
-            indices = array.array('i')
-            indptr = array.array('i', [0])
-            for e in self.estimators_:
-                indices.extend(
-                    np.where(_multiview_predict_binary(e, X,
-                                                       example_indices,
-                                                       view_indices) > thresh)[
-                        0])
-                indptr.append(len(indices))
-
-            data = np.ones(len(indices), dtype=int)
-            indicator = sp.csc_matrix((data, indices, indptr),
-                                      shape=(n_samples, len(self.estimators_)))
-            return self.label_binarizer_.inverse_transform(indicator)
-
-    def get_params(self, deep=True):
-        return self.format_params(
-            OneVsRestClassifier.get_params(self, deep=deep), deep=deep)
-
-
-def _multiview_fit_ovo_binary(estimator, X, y, i, j, train_indices,
-                              view_indices):
-    cond = np.logical_or(y == i, y == j)
-    # y = y[cond]
-    y_binary = np.empty(y.shape, np.int)
-    y_binary[y == i] = 0
-    y_binary[y == j] = 1
-    indcond = np.arange(X.get_nb_examples())[cond]
-    train_indices = np.intersect1d(train_indices, indcond)
-    return _multiview_fit_binary(estimator,
-                                 X,
-                                 y_binary, train_indices, view_indices,
-                                 classes=[i, j]), train_indices
-
-
-class MultiviewOVOWrapper(MultiviewWrapper, OneVsOneClassifier):
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        """Fit underlying estimators.
-
-        Parameters
-        ----------
-        X : (sparse) array-like of shape (n_samples, n_features)
-            Data.
-
-        y : array-like of shape (n_samples,)
-            Multi-class targets.
-
-        Returns
-        -------
-        self
-        """
-        # X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
-        # check_classification_targets(y)
-        train_indices, view_indices = get_examples_views_indices(X,
-                                                                 train_indices,
-                                                                 view_indices)
-        self.classes_ = np.unique(y)
-        if len(self.classes_) == 1:
-            raise ValueError("OneVsOneClassifier can not be fit when only one"
-                             " class is present.")
-        n_classes = self.classes_.shape[0]
-        estimators_indices = list(zip(*([_multiview_fit_ovo_binary(
-            self.estimator, X, y, self.classes_[i], self.classes_[j],
-            train_indices,
-            view_indices
-        )
-            for i in range(n_classes) for j in range(i + 1, n_classes)
-        ])))
-
-        self.estimators_ = estimators_indices[0]
-        self.pairwise_indices_ = (
-            estimators_indices[1] if self._pairwise else None)
-
-        return self
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        """Estimate the best class label for each sample in X.
-
-        This is implemented as ``argmax(decision_function(X), axis=1)`` which
-        will return the label of the class with most votes by estimators
-        predicting the outcome of a decision for each possible class pair.
-
-        Parameters
-        ----------
-        X : (sparse) array-like of shape (n_samples, n_features)
-            Data.
-
-        Returns
-        -------
-        y : numpy array of shape [n_samples]
-            Predicted multi-class targets.
-        """
-        example_indices, view_indices = get_examples_views_indices(X,
-                                                                   example_indices,
-                                                                   view_indices)
-        Y = self.multiview_decision_function(X, example_indices=example_indices,
-                                             view_indices=view_indices)
-        if self.n_classes_ == 2:
-            return self.classes_[(Y > 0).astype(np.int)]
-        return self.classes_[Y.argmax(axis=1)]
-
-    def multiview_decision_function(self, X, example_indices, view_indices): # pragma: no cover
-        # check_is_fitted(self)
-
-        indices = self.pairwise_indices_
-        if indices is None:
-            Xs = [X] * len(self.estimators_)
-        else:
-            # TODO Gram matrix compatibility
-            Xs = [X[:, idx] for idx in indices]
-        predictions = np.vstack(
-            [est.predict(Xi, example_indices=example_indices,
-                         view_indices=view_indices)
-             for est, Xi in zip(self.estimators_, Xs)]).T
-        confidences = np.ones(predictions.shape)
-        # confidences = np.vstack([_predict_binary(est, Xi)
-        #                          for est, Xi in zip(self.estimators_, Xs)]).T
-        Y = _ovr_decision_function(predictions,
-                                   confidences, len(self.classes_))
-        if self.n_classes_ == 2:
-            return Y[:, 1]
-        return Y
-
-    def get_params(self, deep=True):
-        return self.format_params(
-            OneVsOneClassifier.get_params(self, deep=deep), deep=deep)
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py b/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
deleted file mode 100644
index a980b3befc0bf8cf955db16ad5a9de0b92e578af..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/multiview_result_analysis.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# from .. import metrics
-#
-# # Author-Info
-# __author__ = "Baptiste Bauvin"
-# __status__ = "Prototype"  # Production, Development, Prototype
-#
-#
-# def print_metric_score(metric_scores, metrics):
-#     metric_score_string = "\n\n"
-#     for metric in metrics:
-#         metric_module = getattr(metrics, metric[0])
-#         if metric[1] is not None:
-#             metric_kwargs = dict(
-#                 (index, metricConfig) for index, metricConfig in
-#                 enumerate(metric[1]))
-#         else:
-#             metric_kwargs = {}
-#         metric_score_string += "\tFor " + metric_module.get_config(
-#             **metric_kwargs) + " : "
-#         metric_score_string += "\n\t\t- Score on train : " + str(
-#             metric_scores[metric[0]][0])
-#         metric_score_string += "\n\t\t- Score on test : " + str(
-#             metric_scores[metric[0]][1])
-#         metric_score_string += "\n\n"
-#     return metric_score_string
-#
-#
-# def get_total_metric_scores(metric, train_labels, test_labels,
-#                             validation_indices,
-#                             learning_indices, labels):
-#     metric_module = getattr(metrics, metric[0])
-#     if metric[1] is not None:
-#         metric_kwargs = dict((index, metricConfig) for index, metricConfig in
-#                              enumerate(metric[1]))
-#     else:
-#         metric_kwargs = {}
-#     train_score = metric_module.score(labels[learning_indices], train_labels,
-#                                       **metric_kwargs)
-#     test_score = metric_module.score(labels[validation_indices], test_labels,
-#                                      **metric_kwargs)
-#     return [train_score, test_score]
-#
-#
-# def get_metrics_scores(metrics_var, train_labels, test_labels,
-#                        validation_indices, learning_indices, labels):
-#     metrics_scores = {}
-#     for metric in metrics_var:
-#         metrics_scores[metric[0]] = get_total_metric_scores(metric,
-#                                                             train_labels,
-#                                                             test_labels,
-#                                                             validation_indices,
-#                                                             learning_indices,
-#                                                             labels)
-#     return metrics_scores
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/organization.py b/multiview_platform/mono_multi_view_classifiers/utils/organization.py
deleted file mode 100644
index 1fdc0ecf608350c98bf66ff9fdc4e1be238e5b45..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/organization.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import os
-import errno
-
-
-def secure_file_path(file_name): # pragma: no cover
-    if not os.path.exists(os.path.dirname(file_name)):
-        try:
-            os.makedirs(os.path.dirname(file_name))
-        except OSError as exc:
-            if exc.errno != errno.EEXIST:
-                raise
diff --git a/multiview_platform/mono_multi_view_classifiers/utils/transformations.py b/multiview_platform/mono_multi_view_classifiers/utils/transformations.py
deleted file mode 100644
index 17e7b90d3b0d186495893220514524b4e0a648a2..0000000000000000000000000000000000000000
--- a/multiview_platform/mono_multi_view_classifiers/utils/transformations.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import numpy as np
-
-
-def sign_labels(labels):
-    """
-    Returns a label array with (-1,1) as labels.
-    If labels was already made of (-1,1), returns labels.
-    If labels is made of (0,1), returns labels with all
-    zeros transformed in -1.
-
-    Parameters
-    ----------
-    labels
-
-    The original label numpy array
-
-    Returns
-    -------
-    A np.array with labels made of (-1,1)
-    """
-    if 0 in labels:
-        return np.array([label if label != 0 else -1 for label in labels])
-    else:
-        return labels
-
-
-def unsign_labels(labels):
-    """
-    The inverse function
-
-    Parameters
-    ----------
-    labels
-
-    Returns
-    -------
-
-    """
-    if len(labels.shape) == 2:
-        labels = labels.reshape((labels.shape[0],))
-    if -1 in labels:
-        return np.array([label if label != -1 else 0 for label in labels])
-    else:
-        return labels
diff --git a/multiview_platform/tests/__init__.py b/multiview_platform/tests/__init__.py
deleted file mode 100644
index 194018ae5ef03ba4d863b4e1497acae3b317589a..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from . import test_exec_classif
-from .utils import rm_tmp, gen_test_dataset, tmp_path
\ No newline at end of file
diff --git a/multiview_platform/tests/test_config_hps.yml b/multiview_platform/tests/test_config_hps.yml
deleted file mode 100644
index bce80ba6a8ee25002b78e0bbcc333481537f9fa1..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_config_hps.yml
+++ /dev/null
@@ -1,80 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: False
-# The name of each dataset in the directory on which the benchmark should be run
-name: "digits_doc"
-# A label for the resul directory
-label: "example_0"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "../examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "tmp_tests/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# The ratio of test examples/number of train examples
-split: 0.25
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class:
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree"]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_early_fusion",]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: "Random"
-# The number of iteration in the hyper-parameter optimization process
-hps_args:
-  n_iter: 2
-  equivalent_draws: False
-
-### Configuring the hyper-parameters for the classifiers
-
-decision_tree:
-  max_depth: 3
-
-weighted_linear_early_fusion:
-  monoview_classifier_name: "decision_tree"
-  monoview_classifier_config:
-    decision_tree:
-      max_depth: 6
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 3
diff --git a/multiview_platform/tests/test_config_iter.yml b/multiview_platform/tests/test_config_iter.yml
deleted file mode 100644
index f44b34fe9afaa80a4fa68bfc60554dd4394d88db..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_config_iter.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: False
-# The name of each dataset in the directory on which the benchmark should be run
-name: "digits_doc"
-# A label for the resul directory
-label: "example_0"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "../examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "tmp_tests/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# The ratio of test examples/number of train examples
-split: 0.25
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class:
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree"]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_early_fusion",]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 2
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: "None"
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-### Configuring the hyper-parameters for the classifiers
-
-decision_tree:
-  max_depth: 3
-
-weighted_linear_early_fusion:
-  monoview_classifier_name: "decision_tree"
-  monoview_classifier_config:
-    decision_tree:
-      max_depth: 6
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 3
diff --git a/multiview_platform/tests/test_config_simple.yml b/multiview_platform/tests/test_config_simple.yml
deleted file mode 100644
index 02b85b5848885d6f501cfa9265c3263b35b3b70a..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_config_simple.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-# The base configuration of the benchmark
-
-# Enable logging
-log: False
-# The name of each dataset in the directory on which the benchmark should be run
-name: "digits_doc"
-# A label for the resul directory
-label: "example_0"
-# The type of dataset, currently supported ".hdf5", and ".csv"
-file_type: ".hdf5"
-# The views to use in the banchmark, an empty value will result in using all the views
-views:
-# The path to the directory where the datasets are stored, an absolute path is advised
-pathf: "../examples/data/"
-# The niceness of the processes, useful to lower their priority
-nice: 0
-# The random state of the benchmark, useful for reproducibility
-random_state: 42
-# The number of parallel computing threads
-nb_cores: 1
-# Used to run the benchmark on the full dataset
-full: True
-# Used to be able to run more than one benchmark per minute
-debug: False
-# The directory in which the results will be stored, an absolute path is advised
-res_dir: "tmp_tests/"
-# If an error occurs in a classifier, if track_tracebacks is set to True, the
-# benchmark saves the traceback and continues, if it is set to False, it will
-# stop the benchmark and raise the error
-track_tracebacks: True
-
-# All the classification-realted configuration options
-
-# The ratio of test examples/number of train examples
-split: 0.25
-# The nubmer of folds in the cross validation process when hyper-paramter optimization is performed
-nb_folds: 2
-# The number of classes to select in the dataset
-nb_class:
-# The name of the classes to select in the dataset
-classes:
-# The type of algorithms to run during the benchmark (monoview and/or multiview)
-type: ["monoview","multiview"]
-# The name of the monoview algorithms to run, ["all"] to run all the available classifiers
-algos_monoview: ["decision_tree"]
-# The names of the multiview algorithms to run, ["all"] to run all the available classifiers
-algos_multiview: ["weighted_linear_early_fusion", "weighted_linear_late_fusion",]
-# The number of times the benchamrk is repeated with different train/test
-# split, to have more statistically significant results
-stats_iter: 1
-# The metrics that will be use din the result analysis
-metrics:
-  accuracy_score: {}
-  f1_score:
-    average: "micro"
-# The metric that will be used in the hyper-parameter optimization process
-metric_princ: "accuracy_score"
-# The type of hyper-parameter optimization method
-hps_type: "None"
-# The number of iteration in the hyper-parameter optimization process
-hps_args: {}
-
-### Configuring the hyper-parameters for the classifiers
-
-decision_tree:
-  max_depth: 3
-
-weighted_linear_early_fusion:
-  monoview_classifier_name: "decision_tree"
-  monoview_classifier_config:
-    decision_tree:
-      max_depth: 6
-
-weighted_linear_late_fusion:
-  classifiers_names: "decision_tree"
-  classifier_configs:
-    decision_tree:
-      max_depth: 3
diff --git a/multiview_platform/tests/test_database.hdf5 b/multiview_platform/tests/test_database.hdf5
deleted file mode 100644
index 63206a1219daf9e09d9a91d5a2440ef19a88af82..0000000000000000000000000000000000000000
Binary files a/multiview_platform/tests/test_database.hdf5 and /dev/null differ
diff --git a/multiview_platform/tests/test_exec_classif.py b/multiview_platform/tests/test_exec_classif.py
deleted file mode 100644
index 71cdc9e8948cbd1a129091c08e8c6fdedde0f486..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_exec_classif.py
+++ /dev/null
@@ -1,415 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers import exec_classif
-
-
-class Test_execute(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-
-    def test_exec_simple(self):
-        exec_classif.exec_classif(["--config_path", os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_config_simple.yml")])
-
-    def test_exec_iter(self):
-        exec_classif.exec_classif(["--config_path", os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_config_iter.yml")])
-
-    def test_exec_hps(self):
-        exec_classif.exec_classif(["--config_path", os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_config_hps.yml")])
-
-    @classmethod
-    def tearDown(self):
-        rm_tmp()
-
-class Test_gen_single_monoview_arg_dictionary(unittest.TestCase):
-
-    def test_no_config(self):
-        conf = exec_classif.gen_single_monoview_arg_dictionary("classifier_name1",
-                                                               {}, "nb_class",
-                                                               "view_index",
-                                                               "view_name",
-                                                               "hps_kwargs")
-        self.assertEqual(conf, {"classifier_name1": {},
-            "view_name": "view_name",
-            "view_index": "view_index",
-            "classifier_name": "classifier_name1",
-            "nb_class": "nb_class",
-            "hps_kwargs":"hps_kwargs" } )
-
-class Test_initBenchmark(unittest.TestCase):
-
-    def test_benchmark_wanted(self):
-        benchmark_output = exec_classif.init_benchmark(cl_type=["monoview", "multiview"], monoview_algos=["decision_tree"], multiview_algos=["weighted_linear_late_fusion"])
-        self.assertEqual(benchmark_output , {'monoview': ['decision_tree'], 'multiview': ['weighted_linear_late_fusion']})
-        benchmark_output = exec_classif.init_benchmark(
-            cl_type=["monoview", "multiview"], monoview_algos=["all"],
-            multiview_algos=["all"])
-        self.assertEqual(benchmark_output, {'monoview': ['adaboost',
-              'decision_tree',
-              'gradient_boosting',
-              'knn',
-              'lasso',
-              'random_forest',
-              'sgd',
-              'svm_linear',
-              'svm_poly',
-              'svm_rbf'],
- 'multiview': ['bayesian_inference_fusion',
-               'difficulty_fusion',
-               'disagree_fusion',
-               'double_fault_fusion',
-               'entropy_fusion',
-               'majority_voting_fusion',
-               'svm_jumbo_fusion',
-               'weighted_linear_early_fusion',
-               'weighted_linear_late_fusion']})
-
-
-class Test_Functs(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_initKWARGSFunc_no_monoview(self):
-        benchmark = {"monoview": {}, "multiview": {}}
-        args = exec_classif.init_kwargs_func({}, benchmark)
-        self.assertEqual(args, {"monoview": {}, "multiview": {}})
-
-    def test_init_kwargs(self):
-        kwargs = exec_classif.init_kwargs({"decision_tree":""},["decision_tree"])
-        self.assertEqual(kwargs, {"decision_tree":""})
-        kwargs = exec_classif.init_kwargs({"weighted_linear_late_fusion": ""},
-                                          ["weighted_linear_late_fusion"], framework="multiview")
-        self.assertEqual(kwargs, {"weighted_linear_late_fusion": ""})
-        kwargs = exec_classif.init_kwargs({}, ["decision_tree"],)
-        self.assertEqual(kwargs, {"decision_tree":{}})
-        self.assertRaises(AttributeError, exec_classif.init_kwargs, {}, ["test"])
-
-    def test_arange_metrics(self):
-        metrics = exec_classif.arange_metrics({"accuracy_score":{}}, "accuracy_score")
-        self.assertEqual(metrics, {"accuracy_score*":{}})
-        self.assertRaises(ValueError, exec_classif.arange_metrics, {"test1":{}}, "test")
-
-    def test_banchmark_init(self):
-        from sklearn.model_selection import StratifiedKFold
-        folds = StratifiedKFold(n_splits=2)
-        res, lab_names = exec_classif.benchmark_init(directory=tmp_path,
-                                                     classification_indices=[np.array([0,1,2,3]), np.array([4])],
-                                                     labels=test_dataset.get_labels(),
-                                                     labels_dictionary={"yes":0, "no":1},
-                                                     k_folds=folds,
-                                                     dataset_var=test_dataset)
-        self.assertEqual(res, [])
-        self.assertEqual(lab_names, [0, 1])
-
-
-
-
-class Test_InitArgumentDictionaries(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.benchmark = {"monoview": ["fake_monoview_classifier"], "multiview": {}}
-        cls.views_dictionnary = {'test_view_0': 0, 'test_view': 1}
-        cls.nb_class = 2
-        cls.monoview_classifier_name = "fake_monoview_classifier"
-        cls.monoview_classifier_arg_name = "fake_arg"
-        cls.monoview_classifier_arg_value = "fake_value_1"
-        cls.multiview_classifier_name = "fake_multiview_classifier"
-        cls.multiview_classifier_arg_name = "fake_arg_mv"
-        cls.multiview_classifier_arg_value = "fake_value_2"
-        cls.init_kwargs = {
-            'monoview':{
-                cls.monoview_classifier_name:
-                    {cls.monoview_classifier_arg_name:cls.monoview_classifier_arg_value}
-            },
-            "multiview":{
-                cls.multiview_classifier_name:{
-                    cls.multiview_classifier_arg_name:cls.multiview_classifier_arg_value}
-            }
-        }
-
-    def test_init_argument_dictionaries_monoview(self):
-        arguments = exec_classif.init_argument_dictionaries(self.benchmark,
-                                                            self.views_dictionnary,
-                                                            self.nb_class,
-                                                            self.init_kwargs,
-                                                            "None", {})
-        expected_output = [{
-                self.monoview_classifier_name: {
-                    self.monoview_classifier_arg_name:self.monoview_classifier_arg_value},
-                "view_name": "test_view_0",
-                'hps_kwargs': {},
-                "classifier_name": self.monoview_classifier_name,
-                "nb_class": self.nb_class,
-                "view_index": 0},
-                {self.monoview_classifier_name: {
-                    self.monoview_classifier_arg_name: self.monoview_classifier_arg_value},
-                "view_name": "test_view",
-                'hps_kwargs': {},
-                "classifier_name": self.monoview_classifier_name,
-                "nb_class": self.nb_class,
-                "view_index": 1},
-                           ]
-        self.assertEqual(arguments["monoview"], expected_output)
-
-    def test_init_argument_dictionaries_multiview(self):
-        self.benchmark["multiview"] = ["fake_multiview_classifier"]
-        self.benchmark["monoview"] = {}
-        arguments = exec_classif.init_argument_dictionaries(self.benchmark,
-                                                            self.views_dictionnary,
-                                                            self.nb_class,
-                                                            self.init_kwargs,
-                                                            "None", {})
-        expected_output = [{
-                "classifier_name": self.multiview_classifier_name,
-                "view_indices": [0,1],
-                "view_names": ["test_view_0", "test_view"],
-                "nb_class": self.nb_class,
-                'hps_kwargs': {},
-                "labels_names":None,
-                self.multiview_classifier_name: {
-                    self.multiview_classifier_arg_name:
-                        self.multiview_classifier_arg_value},
-        },]
-        self.assertEqual(arguments["multiview"][0], expected_output[0])
-
-
-    def test_init_argument_dictionaries_multiview_complex(self):
-        self.multiview_classifier_arg_value = {"fake_value_2":"plif", "plaf":"plouf"}
-        self.init_kwargs = {
-            'monoview': {
-                self.monoview_classifier_name:
-                    {
-                        self.monoview_classifier_arg_name: self.monoview_classifier_arg_value}
-            },
-            "multiview": {
-                self.multiview_classifier_name: {
-                    self.multiview_classifier_arg_name: self.multiview_classifier_arg_value}
-            }
-        }
-        self.benchmark["multiview"] = ["fake_multiview_classifier"]
-        self.benchmark["monoview"] = {}
-        arguments = exec_classif.init_argument_dictionaries(self.benchmark,
-                                                            self.views_dictionnary,
-                                                            self.nb_class,
-                                                            self.init_kwargs,
-                                                            "None", {})
-        expected_output = [{
-                "classifier_name": self.multiview_classifier_name,
-                "view_indices": [0,1],
-                'hps_kwargs': {},
-                "view_names": ["test_view_0", "test_view"],
-                "nb_class": self.nb_class,
-                "labels_names":None,
-                self.multiview_classifier_name: {
-                    self.multiview_classifier_arg_name:
-                        self.multiview_classifier_arg_value},
-        }]
-        self.assertEqual(arguments["multiview"][0], expected_output[0])
-
-
-def fakeBenchmarkExec(core_index=-1, a=7, args=1):
-    return [core_index, a]
-
-
-def fakeBenchmarkExec_mutlicore(nb_cores=-1, a=6, args=1):
-    return [nb_cores, a]
-
-
-def fakeBenchmarkExec_monocore(dataset_var=1, a=4, args=1, track_tracebacks=False):
-    return [a]
-
-
-def fakegetResults(results, stats_iter,
-                   benchmark_arguments_dictionaries, metrics, directory,
-                   example_ids, labels):
-    return 3
-
-
-def fakeDelete(a, b, c):
-    return 9
-
-def fake_analyze(a, b, c, d, example_ids=None, labels=None):
-    pass
-
-class Test_execBenchmark(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-
-        os.mkdir(tmp_path)
-        cls.Dataset = test_dataset
-        cls.argument_dictionaries = [{"a": 4, "args": {}}]
-        cls.args = {
-            "Base":{"name": "chicken_is_heaven", "type": "type", "pathf": "pathF"},
-            "Classification":{"hps_iter": 1}}
-
-    def test_simple(cls):
-        res = exec_classif.exec_benchmark(nb_cores=1,
-                                          stats_iter=2,
-                                          benchmark_arguments_dictionaries=cls.argument_dictionaries,
-                                          directory="",
-                                          metrics=[[[1, 2], [3, 4, 5]]],
-                                          dataset_var=cls.Dataset,
-                                          track_tracebacks=6,
-                                          # exec_one_benchmark=fakeBenchmarkExec,
-                                          # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
-                                          analyze=fakegetResults,
-                                          delete=fakeDelete,
-                                          analyze_iterations=fake_analyze)
-        cls.assertEqual(res, 3)
-
-    def test_multiclass_no_iter(cls):
-        cls.argument_dictionaries = [{"a": 10, "args": cls.args},
-                                    {"a": 4, "args": cls.args}]
-        res = exec_classif.exec_benchmark(nb_cores=1,
-                                          stats_iter=1,
-                                          benchmark_arguments_dictionaries=cls.argument_dictionaries,
-                                          directory="",
-                                          metrics=[[[1, 2], [3, 4, 5]]],
-                                          dataset_var=cls.Dataset,
-                                          track_tracebacks=6,
-                                          # exec_one_benchmark=fakeBenchmarkExec,
-                                          # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
-                                          analyze=fakegetResults,
-                                          delete=fakeDelete,
-                                          analyze_iterations=fake_analyze)
-        cls.assertEqual(res, 3)
-
-    def test_multiclass_and_iter(cls):
-        cls.argument_dictionaries = [{"a": 10, "args": cls.args},
-                                    {"a": 4, "args": cls.args},
-                                    {"a": 55, "args": cls.args},
-                                    {"a": 24, "args": cls.args}]
-        res = exec_classif.exec_benchmark(nb_cores=1,
-                                          stats_iter=2,
-                                          benchmark_arguments_dictionaries=cls.argument_dictionaries,
-                                          directory="",
-                                          metrics=[[[1, 2], [3, 4, 5]]],
-                                          dataset_var=cls.Dataset,
-                                          track_tracebacks=6,
-                                          # exec_one_benchmark=fakeBenchmarkExec,
-                                          # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
-                                          analyze=fakegetResults,
-                                          delete=fakeDelete,
-                                          analyze_iterations=fake_analyze)
-        cls.assertEqual(res, 3)
-
-    def test_no_iter_biclass_multicore(cls):
-        res = exec_classif.exec_benchmark(nb_cores=1,
-                                          stats_iter=1,
-                                          benchmark_arguments_dictionaries=cls.argument_dictionaries,
-                                          directory="",
-                                          metrics=[[[1, 2], [3, 4, 5]]],
-                                          dataset_var=cls.Dataset,
-                                          track_tracebacks=6,
-                                          # exec_one_benchmark=fakeBenchmarkExec,
-                                          # exec_one_benchmark_multicore=fakeBenchmarkExec_mutlicore,
-                                          exec_one_benchmark_mono_core=fakeBenchmarkExec_monocore,
-                                          analyze=fakegetResults,
-                                          delete=fakeDelete,
-                                          analyze_iterations=fake_analyze)
-        cls.assertEqual(res, 3)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-def fakeExecMono(directory, name, labels_names, classification_indices, k_folds,
-                 coreIndex, type, pathF, random_state, labels,
-                 hyper_param_search="try", metrics="try", n_iter=1, **arguments):
-    return ["Mono", arguments]
-
-
-def fakeExecMulti(directory, coreIndex, name, classification_indices, k_folds,
-                  type, pathF, labels_dictionary,
-                  random_state, labels, hyper_param_search="", metrics=None,
-                  n_iter=1, **arguments):
-    return ["Multi", arguments]
-
-
-def fakeInitMulti(args, benchmark, views, views_indices, argument_dictionaries,
-                  random_state, directory, resultsMonoview,
-                  classification_indices):
-    return {"monoview": [{"try": 0}, {"try2": 100}],
-            "multiview": [{"try3": 5}, {"try4": 10}]}
-
-
-class FakeKfold():
-    def __init__(self):
-        self.n_splits = 2
-        pass
-
-    def split(self, X, Y):
-        return [([X[0], X[1]], [X[2], X[3]]), (([X[2], X[3]], [X[0], X[1]]))]
-
-
-class Test_set_element(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.dictionary = {"a":
-                              {"b":{
-                                  "c":{
-                                      "d":{
-                                          "e":1,
-                                          "f":[1]
-                                      }
-                                  }
-                              }}}
-        cls.elements = {"a.b.c.d.e":1, "a.b.c.d.f":[1]}
-
-    @classmethod
-    def tearDownClass(cls):
-        pass
-
-    def test_simple(self):
-        simplified_dict = {}
-        for path, value in self.elements.items():
-            simplified_dict = exec_classif.set_element(simplified_dict, path, value)
-        self.assertEqual(simplified_dict, self.dictionary)
-
-
-class Test_get_path_dict(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.dictionary = {"a":
-                              {"b":{
-                                  "c":{
-                                      "d":{
-                                          "e":1,
-                                          "f":[1]
-                                      }
-                                  }
-                              }}}
-
-    @classmethod
-    def tearDownClass(cls):
-        pass
-
-    def test_simple(self):
-        path_dict = exec_classif.get_path_dict(self.dictionary)
-        self.assertEqual(path_dict, {"a.b.c.d.e":1, "a.b.c.d.f":[1]})
-
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_metrics/__init__.py b/multiview_platform/tests/test_metrics/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_metrics/test_metrics.py b/multiview_platform/tests/test_metrics/test_metrics.py
deleted file mode 100644
index 301a42d463af85bd67082fdccddeae9e3e9c83c2..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_metrics/test_metrics.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import unittest
-import multiview_platform.mono_multi_view_classifiers.metrics as metrics
-import pkgutil
-import os
-from sklearn.metrics._scorer import _BaseScorer
-
-# Tester que chaque metrique a bien les bonnes fonctions qui renvoient bien les bons types d'outputs avec les bons types d'inputs
-# Faire de meme pour les differents classifeurs monovues et les differents classifeurs multivues
-
-
-class Test_metric(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.test="a"
-
-
-    def test_simple(self):
-        pkgpath = os.path.dirname(metrics.__file__)
-        for _, metric, _ in pkgutil.iter_modules([pkgpath]):
-            module = getattr(metrics, metric)
-            self.assertTrue(hasattr(module, "score"))
-            self.assertTrue(isinstance(module.score([1,0],[1,0]), float))
-            self.assertTrue(hasattr(module, "get_scorer"))
-            self.assertTrue(isinstance(module.get_scorer(), _BaseScorer))
-            self.assertTrue(hasattr(module, "get_config"))
-            self.assertTrue(isinstance(module.get_config(), str))
-
-
diff --git a/multiview_platform/tests/test_mono_view/__init__.py b/multiview_platform/tests/test_mono_view/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_mono_view/test_exec_classif_mono_view.py b/multiview_platform/tests/test_mono_view/test_exec_classif_mono_view.py
deleted file mode 100644
index 784bac2a394c614d1693a343a9e039ca20ef4e06..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_mono_view/test_exec_classif_mono_view.py
+++ /dev/null
@@ -1,245 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-from sklearn.model_selection import StratifiedKFold
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers.monoview import exec_classif_mono_view
-from multiview_platform.mono_multi_view_classifiers.monoview_classifiers import decision_tree
-
-
-class Test_initConstants(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.view_name="test_dataset"
-        cls.datasetFile = h5py.File(
-            tmp_path+"test.hdf5", "w")
-        cls.random_state = np.random.RandomState(42)
-        cls.args = {"classifier_name": "test_clf"}
-        cls.X_value = cls.random_state.randint(0, 500, (10, 20))
-        cls.X = cls.datasetFile.create_dataset("View0", data=cls.X_value)
-        cls.X.attrs["name"] = "test_dataset"
-        cls.X.attrs["sparse"] = False
-        cls.classification_indices = [np.array([0, 2, 4, 6, 8]),
-                                     np.array([1, 3, 5, 7, 9]),
-                                     np.array([1, 3, 5, 7, 9])]
-        cls.labels_names = ["test_true", "test_false"]
-        cls.name = "test"
-        cls.directory = os.path.join(tmp_path, "test_dir/")
-
-    def test_simple(cls):
-        kwargs, \
-        t_start, \
-        feat, \
-        CL_type, \
-        X, \
-        learningRate, \
-        labelsString, \
-        output_file_name,\
-        directory,\
-        base_file_name = exec_classif_mono_view.init_constants(cls.args,
-                                                               cls.X,
-                                                               cls.classification_indices,
-                                                               cls.labels_names,
-                                                               cls.name,
-                                                               cls.directory,
-                                                               cls.view_name)
-        cls.assertEqual(kwargs, cls.args)
-        cls.assertEqual(feat, "test_dataset")
-        cls.assertEqual(CL_type, "test_clf")
-        np.testing.assert_array_equal(X, cls.X_value)
-        cls.assertEqual(learningRate, 0.5)
-        cls.assertEqual(labelsString, "test_true-test_false")
-        # cls.assertEqual(output_file_name, "Code/tests/temp_tests/test_dir/test_clf/test_dataset/results-test_clf-test_true-test_false-learnRate0.5-test-test_dataset-")
-
-    @classmethod
-    def tearDownClass(cls):
-        os.remove(tmp_path+"test.hdf5")
-        os.rmdir(
-            tmp_path+"test_dir/test_clf/test_dataset")
-        os.rmdir(tmp_path+"test_dir/test_clf")
-        os.rmdir(tmp_path+"test_dir")
-        os.rmdir(tmp_path)
-
-
-class Test_initTrainTest(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.random_state = np.random.RandomState(42)
-        cls.X = cls.random_state.randint(0, 500, (10, 5))
-        cls.Y = cls.random_state.randint(0, 2, 10)
-        cls.classification_indices = [np.array([0, 2, 4, 6, 8]),
-                                     np.array([1, 3, 5, 7, 9]),
-                                   ]
-
-    def test_simple(cls):
-        X_train, y_train, X_test, y_test = exec_classif_mono_view.init_train_test(
-            cls.X, cls.Y, cls.classification_indices)
-
-        np.testing.assert_array_equal(X_train, np.array(
-            [np.array([102, 435, 348, 270, 106]),
-             np.array([466, 214, 330, 458, 87]),
-             np.array([149, 308, 257, 343, 491]),
-             np.array([276, 160, 459, 313, 21]),
-             np.array([58, 169, 475, 187, 463])]))
-        np.testing.assert_array_equal(X_test, np.array(
-            [np.array([71, 188, 20, 102, 121]),
-             np.array([372, 99, 359, 151, 130]),
-             np.array([413, 293, 385, 191, 443]),
-             np.array([252, 235, 344, 48, 474]),
-             np.array([270, 189, 445, 174, 445])]))
-        np.testing.assert_array_equal(y_train, np.array([0, 0, 1, 0, 0]))
-        np.testing.assert_array_equal(y_test, np.array([1, 1, 0, 0, 0]))
-
-
-class Test_getHPs(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.classifierModule = decision_tree
-        cls.hyper_param_search = "Random"
-        cls.classifier_name = "decision_tree"
-        cls.random_state = np.random.RandomState(42)
-        cls.X = cls.random_state.randint(0,10,size=(10,5))
-        cls.y = cls.random_state.randint(0,2,size=10)
-        cls.output_file_name = tmp_path
-        cls.cv = StratifiedKFold(n_splits=2, random_state=cls.random_state, shuffle=True)
-        cls.nb_cores = 1
-        cls.metrics = {"accuracy_score*": {}}
-        cls.kwargs = {"decision_tree" : {"max_depth": 1,
-                      "criterion": "gini",
-                      "splitter": "best"}}
-        cls.classifier_class_name = "DecisionTree"
-        cls.hps_kwargs = {"n_iter": 2}
-
-    @classmethod
-    def tearDownClass(cls):
-        for file_name in os.listdir(tmp_path):
-            os.remove(
-                os.path.join(tmp_path, file_name))
-        os.rmdir(tmp_path)
-
-    def test_simple(self):
-        kwargs = exec_classif_mono_view.get_hyper_params(self.classifierModule,
-                                                         self.hyper_param_search,
-                                                         self.classifier_name,
-                                                         self.classifier_class_name,
-                                                         self.X,
-                                                         self.y,
-                                                         self.random_state,
-                                                         self.output_file_name,
-                                                         self.cv,
-                                                         self.nb_cores,
-                                                         self.metrics,
-                                                         self.kwargs,
-                                                         **self.hps_kwargs)
-    def test_simple_config(self):
-        kwargs = exec_classif_mono_view.get_hyper_params(self.classifierModule,
-                                                         "None",
-                                                         self.classifier_name,
-                                                         self.classifier_class_name,
-                                                         self.X,
-                                                         self.y,
-                                                         self.random_state,
-                                                         self.output_file_name,
-                                                         self.cv,
-                                                         self.nb_cores,
-                                                         self.metrics,
-                                                         self.kwargs,
-                                                         **self.hps_kwargs)
-
-
-class Test_exec_monoview(unittest.TestCase):
-
-    def test_simple(self):
-        os.mkdir(tmp_path)
-        out = exec_classif_mono_view.exec_monoview(tmp_path,
-                                                   test_dataset.get_v(0),
-                                                   test_dataset.get_labels(),
-                                                   "test dataset",
-                                                   ["yes", "no"],
-                                                   [np.array([0,1,2,4]), np.array([4])],
-                                                   StratifiedKFold(n_splits=2),
-                                                   1,
-                                                   "",
-                                                   "",
-                                                   np.random.RandomState(42),
-                                                   "Random",
-                                                   n_iter=2,
-                                                   **{"classifier_name":"decision_tree",
-                                                    "view_index":0,
-                                                      "decision_tree":{}})
-        rm_tmp()
-
-# class Test_getKWARGS(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.classifierModule = None
-#         cls.hyper_param_search = "None"
-#         cls.nIter = 2
-#         cls.CL_type = "string"
-#         cls.X_train = np.zeros((10,20))
-#         cls.y_train = np.zeros((10))
-#         cls.random_state = np.random.RandomState(42)
-#         cls.outputFileName = "test_file"
-#         cls.KFolds = None
-#         cls.nbCores = 1
-#         cls.metrics = {"accuracy_score":""}
-#         cls.kwargs = {}
-#
-#     def test_simple(cls):
-#         clKWARGS = ExecClassifMonoView.getHPs(cls.classifierModule,
-#                                               cls.hyper_param_search,
-#                                               cls.nIter,
-#                                               cls.CL_type,
-#                                               cls.X_train,
-#                                               cls.y_train,
-#                                               cls.random_state,
-#                                               cls.outputFileName,
-#                                               cls.KFolds,
-#                                               cls.nbCores,
-#                                               cls.metrics,
-#                                               cls.kwargs)
-#         pass
-#
-# class Test_saveResults(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.stringAnalysis = "string analysis"
-#         cls.outputFileName = "test_file"
-#         cls.full_labels_pred = np.zeros(10)
-#         cls.y_train_pred = np.ones(5)
-#         cls.y_train = np.zeros(5)
-#         cls.imagesAnalysis = {}
-#
-#     def test_simple(cls):
-#         ExecClassifMonoView.saveResults(cls.stringAnalysis,
-#                                         cls.outputFileName,
-#                                         cls.full_labels_pred,
-#                                         cls.y_train_pred,
-#                                         cls.y_train,
-#                                         cls.imagesAnalysis)
-#         # Test if the files are created with the right content
-#
-#     def test_with_image_analysis(cls):
-#         cls.imagesAnalysis = {"test_image":"image.png"} # Image to gen
-#         ExecClassifMonoView.saveResults(cls.stringAnalysis,
-#                                         cls.outputFileName,
-#                                         cls.full_labels_pred,
-#                                         cls.y_train_pred,
-#                                         cls.y_train,
-#                                         cls.imagesAnalysis)
-#         # Test if the files are created with the right content
-#
diff --git a/multiview_platform/tests/test_mono_view/test_monoview_utils.py b/multiview_platform/tests/test_mono_view/test_monoview_utils.py
deleted file mode 100644
index b0f414ba102a1e55e8882d26052c1af695518695..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_mono_view/test_monoview_utils.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import unittest
-
-import numpy as np
-from sklearn.model_selection import StratifiedKFold
-from sklearn.tree import DecisionTreeClassifier
-
-from multiview_platform.mono_multi_view_classifiers.monoview import monoview_utils
-from multiview_platform.mono_multi_view_classifiers.utils.hyper_parameter_search import CustomRandint
-
-class TestFunctions(unittest.TestCase):
-
-    def test_gen_test_folds_preds(self):
-        self.random_state = np.random.RandomState(42)
-        self.X_train = self.random_state.random_sample((31, 10))
-        self.y_train = np.ones(31, dtype=int)
-        self.KFolds = StratifiedKFold(n_splits=3, )
-
-        self.estimator = DecisionTreeClassifier(max_depth=1)
-
-        self.y_train[15:] = -1
-        testFoldsPreds = monoview_utils.gen_test_folds_preds(self.X_train,
-                                                             self.y_train,
-                                                             self.KFolds,
-                                                             self.estimator)
-        self.assertEqual(testFoldsPreds.shape, (3, 10))
-        np.testing.assert_array_equal(testFoldsPreds[0], np.array(
-            [ 1,  1, -1, -1,  1,  1, -1,  1, -1,  1]))
-
-    def test_change_label_to_minus(self):
-        lab = monoview_utils.change_label_to_minus(np.array([0,1,0]))
-        np.testing.assert_array_equal(lab, np.array([-1,1,-1]))
-
-    def test_change_label_to_zero(self):
-        lab = monoview_utils.change_label_to_zero(np.array([-1,1,-1]))
-        np.testing.assert_array_equal(lab, np.array([0,1,0]))
-
-    def test_compute_possible_combinations(self):
-        n_possib = monoview_utils.compute_possible_combinations({"a":[1, 2], "b":{"c":[2,3]}, "d":CustomRandint(0,10)})
-        np.testing.assert_array_equal(n_possib, np.array([2, np.inf, 10]))
-
-class FakeClf(monoview_utils.BaseMonoviewClassifier):
-
-    def __init__(self):
-        pass
-
-
-class TestBaseMonoviewClassifier(unittest.TestCase):
-
-    def test_simple(self):
-        name = FakeClf().get_name_for_fusion()
-        self.assertEqual(name, 'Fake')
diff --git a/multiview_platform/tests/test_monoview_classifiers/__init__.py b/multiview_platform/tests/test_monoview_classifiers/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_monoview_classifiers/test_adaboost.py b/multiview_platform/tests/test_monoview_classifiers/test_adaboost.py
deleted file mode 100644
index 94f5f835d05d4292a57998d9a3175d0afe6fec89..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_monoview_classifiers/test_adaboost.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# import unittest
-# import numpy as np
-# from sklearn.tree import DecisionTreeClassifier
-#
-# from ...mono_multi_view_classifiers.monoview_classifiers import Adaboost
-#
-#
-# class Test_canProbas(unittest.TestCase):
-#
-#     def test_simple(cls):
-#         cls.assertTrue(Adaboost.canProbas())
-#
-#
-# class Test_paramsToSet(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.n_iter = 4
-#         cls.random_state = np.random.RandomState(42)
-#
-#     def test_simple(cls):
-#         res = Adaboost.paramsToSet(cls.n_iter, cls.random_state)
-#         cls.assertEqual(len(res), cls.n_iter)
-#         cls.assertEqual(type(res[0][0]), int)
-#         cls.assertEqual(type(res[0][1]), type(DecisionTreeClassifier()))
-#         cls.assertEqual([7,4,13,11], [resIter[0] for resIter in res])
-#
-#
-# class Test_getKWARGS(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.kwargs_list = [("CL_Adaboost_n_est", 10),
-#                            ("CL_Adaboost_b_est", DecisionTreeClassifier())]
-#
-#     def test_simple(cls):
-#         res = Adaboost.getKWARGS(cls.kwargs_list)
-#         cls.assertIn("0", res)
-#         cls.assertIn("1", res)
-#         cls.assertEqual(type(res), dict)
-#         cls.assertEqual(res["0"], 10)
-#         # Can't test decision tree
-#
-#     def test_wrong(cls):
-#         cls.kwargs_list[0] = ("chicken_is_heaven",42)
-#         with cls.assertRaises(ValueError) as catcher:
-#             Adaboost.getKWARGS(cls.kwargs_list)
-#         exception = catcher.exception
-#         # cls.assertEqual(exception, "Wrong arguments served to Adaboost")
-#
-#
-# class Test_randomizedSearch(unittest.TestCase):
-#
-#     def test_simple(cls):
-#         pass  # Test with simple params
-#
-#
-# class Test_fit(unittest.TestCase):
-#
-#     def setUp(self):
-#         self.random_state = np.random.RandomState(42)
-#         self.dataset = self.random_state.randint(0, 100, (10, 5))
-#         self.labels = self.random_state.randint(0, 2, 10)
-#         self.kwargs = {"0": 5}
-#         self.classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
-#
-#     def test_fit_kwargs_string(self):
-#         self.kwargs = {"0": "5"}
-#         classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
-#         self.assertEqual(classifier.n_estimators, 5)
-#
-#     def test_fit_kwargs_int(self):
-#         self.kwargs = {"0": 5}
-#         classifier = Adaboost.fit(self.dataset, self.labels, 42, NB_CORES=1, **self.kwargs)
-#         self.assertEqual(classifier.n_estimators, 5)
-#
-#     def test_fit_labels(self):
-#         predicted_labels = self.classifier.predict(self.dataset)
-#         np.testing.assert_array_equal(predicted_labels, self.labels)
-#
diff --git a/multiview_platform/tests/test_monoview_classifiers/test_compatibility.py b/multiview_platform/tests/test_monoview_classifiers/test_compatibility.py
deleted file mode 100644
index 91c566df14d478c9e8040955d94e70b96bf75df0..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_monoview_classifiers/test_compatibility.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# import os
-# import unittest
-#
-
-# Actuellement problématique a cause de la pep8isation du code. A voir plus tard
-
-
-# import numpy as np
-#
-# from ...mono_multi_view_classifiers import monoview_classifiers
-#
-#
-# class Test_methods(unittest.TestCase):
-#
-#     def test_simple(self):
-#         for fileName in os.listdir(
-#                 "multiview_platform/mono_multi_view_classifiers/monoview_classifiers"):
-#             if fileName[-3:] == ".py" and fileName != "__init__.py":
-#                 monoview_classifier_module = getattr(monoview_classifiers,
-#                                                      fileName[:-3])
-#                 self.assertIn("formatCmdArgs", dir(monoview_classifier_module),
-#                               fileName[
-#                               :-3] + " must have getKWARGS method implemented")
-#                 self.assertIn("paramsToSet", dir(monoview_classifier_module),
-#                               fileName[
-#                               :-3] + " must have randomizedSearch method implemented")
-#                 #test to be changed find name of class not same name of module
-#                 # self.assertIn(fileName[:-3], dir(monoview_classifier_module),
-#                 #              fileName[
-#                 #              :-3] + " must have it's own class implemented")
-#
-#                 monoview_classifier_class = getattr(monoview_classifier_module,
-#                                                     fileName[:-3])
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_class, "getInterpret"),
-#                     fileName[:-3] + " class must have getInterpret implemented")
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_class, "canProbas", ),
-#                     fileName[:-3] + " class must have canProbas implemented")
-#                 monoview_classifier_instance = monoview_classifier_class()
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_instance, "param_names", ),
-#                     fileName[:-3] + " class must have param_names attribute")
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_instance, "classed_params", ),
-#                     fileName[:-3] + " class must have classed_params attribute")
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_instance, "distribs", ),
-#                     fileName[:-3] + " class must have distribs attribute")
-#                 self.assertTrue(
-#                     hasattr(monoview_classifier_instance, "weird_strings", ),
-#                     fileName[:-3] + " class must have weird_strings attribute")
-#                 # check_estimator(monoview_classifier_instance)
-#
-#
-# class Test_canProbas(unittest.TestCase):
-#
-#     def test_outputs(self):
-#         for fileName in os.listdir(
-#                 "multiview_platform/mono_multi_view_classifiers/monoview_classifiers"):
-#             if fileName[-3:] == ".py" and fileName != "__init__.py":
-#                 monoview_classifier_module = getattr(monoview_classifiers,
-#                                                      fileName[:-3])
-#                 monoview_classifier_class = getattr(monoview_classifier_module,
-#                                                     fileName[:-3])()
-#                 res = monoview_classifier_class.canProbas()
-#                 self.assertEqual(type(res), bool,
-#                                  "canProbas must return a boolean")
-#
-#     def test_inputs(self):
-#         for fileName in os.listdir(
-#                 "multiview_platform/mono_multi_view_classifiers/monoview_classifiers"):
-#             if fileName[-3:] == ".py" and fileName != "__init__.py":
-#                 monoview_classifier_module = getattr(monoview_classifiers,
-#                                                      fileName[:-3])
-#                 monoview_classifier_class = getattr(monoview_classifier_module,
-#                                                     fileName[:-3])()
-#                 with self.assertRaises(TypeError,
-#                                        msg="canProbas must have 0 args") as catcher:
-#                     monoview_classifier_class.canProbas(35)
-#
-#
-# class Test_fit(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.random_state = np.random.RandomState(42)
-#         cls.dataset = cls.random_state.random_sample((10, 20))
-#         cls.labels = cls.random_state.randint(0, 2, 10)
-#
-#     # def test_inputs(cls):
-#     #     # DATASET, CLASS_LABELS, random_state, NB_CORES=1, **kwargs
-#     #     for fileName in os.listdir("Code/mono_multi_view_classifiers/monoview_classifiers"):
-#     #         if fileName[-3:] == ".py" and fileName != "__init__.py":
-#     #             monoview_classifier_module = getattr(monoview_classifiers, fileName[:-3])
-#     #             cls.args = dict((str(index), value) for index, value in
-#     #                             enumerate(monoview_classifier_module.paramsToSet(1, cls.random_state)[0]))
-#     #             res = monoview_classifier_module.fit(cls.dataset, cls.labels, cls.random_state, **cls.args)
-#     #             with cls.assertRaises(TypeError, msg="fit must have 3 positional args, one kwarg") as catcher:
-#     #                 monoview_classifier_module.fit()
-#     #                 monoview_classifier_module.fit(cls.dataset)
-#     #                 monoview_classifier_module.fit(cls.dataset,cls.labels)
-#     #                 monoview_classifier_module.fit(cls.dataset,cls.labels, cls.random_state, 1, 10)
-#
-#     # def test_outputs(cls):
-#     #     for fileName in os.listdir("Code/mono_multi_view_classifiers/monoview_classifiers"):
-#     #         if fileName[-3:] == ".py" and fileName != "__init__.py":
-#     #             monoview_classifier_module = getattr(monoview_classifiers, fileName[:-3])
-#     #             cls.args = dict((str(index), value) for index, value in
-#     #                             enumerate(monoview_classifier_module.paramsToSet(1, cls.random_state)[0]))
-#     #             res = monoview_classifier_module.fit(cls.dataset, cls.labels, cls.random_state, **cls.args)
-#     #             cls.assertIn("predict", dir(res), "fit must return an object able to predict")
-#
-#
-# class Test_paramsToSet(unittest.TestCase):
-#
-#     def test_inputs(self):
-#         for fileName in os.listdir(
-#                 "multiview_platform/mono_multi_view_classifiers/monoview_classifiers"):
-#             if fileName[-3:] == ".py" and fileName != "__init__.py":
-#                 monoview_classifier_module = getattr(monoview_classifiers,
-#                                                      fileName[:-3])
-#                 with self.assertRaises(TypeError,
-#                                        msg="paramsToSet must have 2 positional args") as catcher:
-#                     monoview_classifier_module.paramsToSet(2,
-#                                                            np.random.RandomState(
-#                                                                42), 10)
-#                     monoview_classifier_module.paramsToSet(2)
-#                     monoview_classifier_module.paramsToSet()
-#                 res = monoview_classifier_module.paramsToSet(2,
-#                                                              np.random.RandomState(
-#                                                                  42))
-#
-#     def test_outputs(self):
-#         for fileName in os.listdir(
-#                 "multiview_platform/mono_multi_view_classifiers/monoview_classifiers"):
-#             if fileName[-3:] == ".py" and fileName != "__init__.py":
-#                 monoview_classifier_module = getattr(monoview_classifiers,
-#                                                      fileName[:-3])
-#                 res = monoview_classifier_module.paramsToSet(2,
-#                                                              np.random.RandomState(
-#                                                                  42))
-#                 self.assertEqual(type(res), list)
-#                 self.assertEqual(len(res), 2)
-#                 self.assertEqual(type(res[0]), dict)
-#
-# # class Test_getKWARGS(unittest.TestCase):
-# #
-# #     # TODO : Find a way to enter the right args
-# #
-# #     def test_inputs(self):
-# #         for fileName in os.listdir("Code/mono_multi_view_classifiers/monoview_classifiers"):
-# #             if fileName[-3:] == ".py" and fileName != "__init__.py":
-# #                 monoview_classifier_module = getattr(monoview_classifiers, fileName[:-3])
-# #                 with self.assertRaises(TypeError, msg="getKWARGS must have 1 positional args") as catcher:
-# #                     monoview_classifier_module.getKWARGS()
-# #                     monoview_classifier_module.getKWARGS([1],2)
diff --git a/multiview_platform/tests/test_multi_view/__init__.py b/multiview_platform/tests/test_multi_view/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multi_view/test_exec_multiview.py b/multiview_platform/tests/test_multi_view/test_exec_multiview.py
deleted file mode 100644
index e0e6d872a215820518b098715fe297dd9f7c1fce..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multi_view/test_exec_multiview.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-from sklearn.model_selection import StratifiedKFold
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers.multiview import exec_multiview
-
-
-class Test_init_constants(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        classifier_name, t_start, views_indices, \
-        classifier_config, views, learning_rate, labels, output_file_name, \
-        directory, base_file_name, metrics = exec_multiview.init_constants(
-            kwargs={"view_names":["ViewN0", "ViewN2", "ViewN1", ],
-                    "view_indices": [0,2,1],
-                    "classifier_name":"test_clf",
-                    "test_clf":{}},
-            classification_indices=[np.array([0,1,4,2]), np.array([3])],
-            metrics={"accuracy_score*":{}},
-            name="test_dataset",
-            nb_cores=1,
-            k_folds=StratifiedKFold(n_splits=2),
-            dataset_var=test_dataset,
-            directory=tmp_path
-        )
-        self.assertEqual(classifier_name, "test_clf")
-        self.assertEqual(views_indices, [0,2,1])
-        self.assertEqual(classifier_config, {})
-        self.assertEqual(views, ["ViewN0", "ViewN2", "ViewN1", ])
-        self.assertEqual(learning_rate, 4/5)
-
-    def test_exec_multiview_no_hps(self):
-        res = exec_multiview.exec_multiview(
-            directory=tmp_path,
-            dataset_var=test_dataset,
-            name="test_dataset",
-            classification_indices=[np.array([0,1,4,2]), np.array([3])],
-            k_folds=StratifiedKFold(n_splits=2),
-            nb_cores=1,
-            database_type="", path="",
-            labels_dictionary={0:"yes", 1:"no"},
-            random_state=np.random.RandomState(42),
-            labels=test_dataset.get_labels(),
-            hps_method="None",
-            hps_kwargs={},
-            metrics=None,
-            n_iter=30,
-            **{"view_names":["ViewN0", "ViewN2", "ViewN1", ],
-                    "view_indices": [0,2,1],
-                    "classifier_name":"weighted_linear_early_fusion",
-                    "weighted_linear_early_fusion":{}}
-        )
-
-    def test_exec_multiview(self):
-        res = exec_multiview.exec_multiview(
-            directory=tmp_path,
-            dataset_var=test_dataset,
-            name="test_dataset",
-            classification_indices=[np.array([0,1,4,2]), np.array([3])],
-            k_folds=StratifiedKFold(n_splits=2),
-            nb_cores=1,
-            database_type="", path="",
-            labels_dictionary={0:"yes", 1:"no"},
-            random_state=np.random.RandomState(42),
-            labels=test_dataset.get_labels(),
-            hps_method="Grid",
-            hps_kwargs={"param_grid":
-                            {"monoview_classifier_config":[{"max_depth":3}, {"max_depth":1}]},
-                             },
-            metrics=None,
-            n_iter=30,
-            **{"view_names":["ViewN0", "ViewN2", "ViewN1", ],
-                    "view_indices": [0,2,1],
-                    "classifier_name":"weighted_linear_early_fusion",
-                    "weighted_linear_early_fusion":{}}
-        )
\ No newline at end of file
diff --git a/multiview_platform/tests/test_multi_view/test_multiview_utils.py b/multiview_platform/tests/test_multi_view/test_multiview_utils.py
deleted file mode 100644
index 6cb880637e3d415844199fb103c6122184c3a143..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multi_view/test_multiview_utils.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-from sklearn.model_selection import StratifiedKFold
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers.multiview import multiview_utils
-
-
-class FakeMVClassif(multiview_utils.BaseMultiviewClassifier):
-
-    def __init__(self, mc=True):
-        self.mc=mc
-        pass
-
-    def fit(self, X, y):
-        if not self.mc:
-            raise ValueError
-        else:
-            pass
-
-
-
-class TestBaseMultiviewClassifier(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_accepts_multiclass(self):
-        rs = np.random.RandomState(42)
-        accepts = FakeMVClassif().accepts_multi_class(rs)
-        self.assertEqual(accepts, True)
-        accepts = FakeMVClassif(mc=False).accepts_multi_class(rs)
-        self.assertEqual(accepts, False)
-        self.assertRaises(ValueError, FakeMVClassif(mc=False).accepts_multi_class, rs,**{"n_samples":2, "n_classes":3})
-
-class TestConfigGenerator(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.rs = np.random.RandomState(42)
-
-    def test_simple(self):
-        cfg_gen = multiview_utils.ConfigGenerator(["decision_tree", "decision_tree"])
-        sample = cfg_gen.rvs(self.rs)
-        self.assertEqual(sample, {'decision_tree': {'criterion': 'entropy',
-                   'max_depth': 103,
-                   'splitter': 'best'}})
-
-class TestFunctions(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-        cls.rs = np.random.RandomState(42)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_get_available_monoview_classifiers(self):
-        avail = multiview_utils.get_available_monoview_classifiers()
-        self.assertEqual(avail, ['adaboost',
-                                 'decision_tree',
-                                 'gradient_boosting',
-                                 'knn',
-                                 'lasso',
-                                 'random_forest',
-                                 'sgd',
-                                 'svm_linear',
-                                 'svm_poly',
-                                 'svm_rbf'])
-        avail = multiview_utils.get_available_monoview_classifiers(need_probas=True)
-        self.assertEqual(avail, ['adaboost',
-                                 'decision_tree',
-                                 'gradient_boosting',
-                                 'knn',
-                                 'random_forest',
-                                 'svm_linear',
-                                 'svm_poly',
-                                 'svm_rbf'])
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/__init__.py b/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py b/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py
deleted file mode 100644
index 65e22eb8f7dff86aec92af8d1c7adc9e21838d49..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/Test_PseudoCQMeasure/test_PseudoCQFusionModule.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# import unittest
-#
-# import numpy as np
-#
-# from ....mono_multi_view_classifiers.multiview_classifiers.entropy_fusion_old import EntropyFusionModule
-#
-# class Test_entropy(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.classifiersDecisions = np.array([
-#             [np.random.randint(0,2,(2,5)), [[0,0,1,0,1], [0,1,0,1,0]], np.random.randint(0,2,(2,5)), np.random.randint(0,2,(2,5)), np.random.randint(0,2,(2,5))],
-#             [np.random.randint(0,2, (2, 5)), np.random.randint(0,2, (2, 5)), np.random.randint(0,2, (2, 5)), [[0, 0, 1, 1, 0], [0, 1, 0, 1, 0]], np.random.randint(0,2, (2, 5))],
-#             [np.random.randint(0,2, (2, 5)), np.random.randint(0,2, (2, 5)), np.random.randint(0,2, (2, 5)), np.random.randint(0,2, (2, 5)), [[0, 1, 1, 1, 1], [0, 1, 0, 1, 0]]],
-#             ])
-#         cls.combination = [1,3,4]
-#         cls.foldsGroudTruth = np.array([[1,1,0,0,1], [0,1,0,1,0]])
-#         cls.foldsLen = ""
-#
-#     def test_simple(cls):
-#         entropy_score = EntropyFusionModule.entropy(cls.classifiersDecisions, cls.combination, cls.foldsGroudTruth,cls.foldsLen)
-#         cls.assertEqual(entropy_score, 0.15, 'Wrong values for entropy measure')
diff --git a/multiview_platform/tests/test_multiview_classifiers/__init__.py b/multiview_platform/tests/test_multiview_classifiers/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_additions/__init__.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
deleted file mode 100644
index debdc51b799833cf87064c8dfe788f49c4dda879..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_diversity_utils.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import unittest
-import numpy as np
-
-import  multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.diversity_utils  as du
-
-
-class FakeDataset():
-
-    def __init__(self, views, labels):
-        self.nb_views = views.shape[0]
-        self.dataset_length = views.shape[2]
-        self.views = views
-        self.labels = labels
-
-    def get_v(self, view_index, example_indices):
-        return self.views[view_index, example_indices]
-
-    def get_nb_class(self, example_indices):
-        return np.unique(self.labels[example_indices])
-
-
-class FakeDivCoupleClf(du.CoupleDiversityFusionClassifier):
-
-    def __init__(self, rs, classifier_names=None,
-                 classifiers_config=None, monoview_estimators=None):
-        super(FakeDivCoupleClf, self).__init__(random_state=rs,
-                                               classifier_names=classifier_names,
-                                               classifier_configs=classifiers_config,
-                                               monoview_estimators=monoview_estimators)
-        self.rs = rs
-
-    def diversity_measure(self, a, b, c):
-        return self.rs.randint(0,100)
-
-
-class FakeDivGlobalClf(du.GlobalDiversityFusionClassifier):
-
-    def __init__(self, rs, classifier_names=None,
-                 classifiers_config=None, monoview_estimators=None):
-        super(FakeDivGlobalClf, self).__init__(random_state=rs,
-                                               classifier_names=classifier_names,
-                                               classifier_configs=classifiers_config,
-                                               monoview_estimators=monoview_estimators)
-        self.rs = rs
-
-    def diversity_measure(self, a, b, c):
-        return self.rs.randint(0,100)
-
-class Test_DiversityFusion(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.classifier_names = ["adaboost", "decision_tree"]
-        cls.classifiers_config = {"adaboost":{"n_estimators":5,}}
-        cls.random_state = np.random.RandomState(42)
-        cls.y = cls.random_state.randint(0,2,6)
-        cls.X = FakeDataset(cls.random_state.randint(0,100,(2,5,6)), cls.y)
-        cls.train_indices = [0,1,2,4]
-        cls.views_indices = [0,1]
-
-    def test_simple_couple(self):
-        clf = FakeDivCoupleClf(self.random_state, classifier_names=self.classifier_names,
-                                              classifiers_config=self.classifiers_config)
-        clf.fit(self.X, self.y, self.train_indices, self.views_indices)
-
-    def test_simple_global(self):
-        clf = FakeDivGlobalClf(self.random_state,
-                               classifier_names=self.classifier_names,
-                               classifiers_config=self.classifiers_config)
-        clf.fit(self.X, self.y, self.train_indices, self.views_indices)
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_jumbo_fusion_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_additions/test_jumbo_fusion_utils.py
deleted file mode 100644
index 9e242ed89bd067148b0d4caa5da39f4057d04c26..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_additions/test_jumbo_fusion_utils.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import unittest
-import numpy as np
-
-import  multiview_platform.mono_multi_view_classifiers.multiview_classifiers.additions.jumbo_fusion_utils  as ju
-
-
-class FakeDataset():
-
-    def __init__(self, views, labels):
-        self.nb_views = views.shape[0]
-        self.dataset_length = views.shape[2]
-        self.views = views
-        self.labels = labels
-
-    def get_v(self, view_index, example_indices):
-        return self.views[view_index, example_indices]
-
-    def get_nb_class(self, example_indices):
-        return np.unique(self.labels[example_indices])
-
-
-#TODO
\ No newline at end of file
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py
deleted file mode 100644
index ff298b8c07ed5a8225f27885ad911b935bbd17ba..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_difficulty_fusion.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import difficulty_fusion
-
-
-class Test_difficulty_fusion(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state=np.random.RandomState(42)
-        cls.classifiers_decisions = cls.random_state.randint(0, 2, size=(5, 3, 5))
-        cls.combination = [1, 3, 4]
-        cls.y = np.array([1, 1, 0, 0, 1])
-        cls.difficulty_fusion_clf = difficulty_fusion.DifficultyFusion()
-
-    def test_simple(cls):
-        difficulty_measure = cls.difficulty_fusion_clf.diversity_measure(
-            cls.classifiers_decisions,
-            cls.combination,
-            cls.y)
-        cls.assertAlmostEqual(difficulty_measure, 0.1875)
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py
deleted file mode 100644
index bb08e016e5ea75ac78f373ead873e8721eea31e3..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_disagree_fusion.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# # import unittest
-#
-import numpy as np
-import unittest
-#
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import disagree_fusion
-
-
-class Test_disagree(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoview_decision_1 = np.array([0, 0, 1, 1])
-        cls.monoview_decision_2 = np.array([0, 1, 0, 1])
-        cls.ground_truth = None
-        cls.clf = disagree_fusion.DisagreeFusion()
-
-    def test_simple(cls):
-        disagreement = cls.clf.diversity_measure(cls.monoview_decision_1,
-                                                    cls.monoview_decision_2,
-                                                    cls.ground_truth)
-        np.testing.assert_array_equal(disagreement,
-                                      np.array([False, True, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py b/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
deleted file mode 100644
index 46c9e59652d29787f2a10a3faca09f796c300f72..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_diversity_utils.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# import numpy as np
-# import unittest
-#
-# from multiview_platform.mono_multi_view_classifiers.multiview.additions import \
-#     diversity_utils
-#
-#
-# def fake_measure(a, b, c, d, e):
-#     return 42
-#
-#
-# class Test_global_div_measure(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.random_state = np.random.RandomState(42)
-#         cls.allClassifiersNames = [["SCM", "SVM", "DT"], ["SCM", "SVM", "DT"]]
-#         cls.views_indices = np.array([0, 1])
-#         cls.classifiersDecisions = np.zeros(
-#             (cls.views_indices.shape[0], len(cls.allClassifiersNames), 3, 6),
-#             dtype=int)
-#         for classifer_index, classifier in enumerate(cls.allClassifiersNames):
-#             for view_index, view in enumerate(cls.views_indices):
-#                 cls.classifiersDecisions[
-#                     view_index, classifer_index] = np.array([
-#                     cls.random_state.randint(0, 2, 6),
-#                     cls.random_state.randint(0, 2, 6),
-#                     cls.random_state.randint(0, 2, 6)])
-#         cls.folds_ground_truth = np.array(
-#             [np.array([1, 1, 1, 0, 0, 0]) for _ in range(3)])
-#         cls.classification_indices = np.array([])
-#         cls.measurement = fake_measure
-#
-#     def test_simple(cls):
-#         clf_names, diversity_measure = diversity_utils.global_div_measure(
-#             cls.allClassifiersNames,
-#             cls.classifiersDecisions,
-#             cls.measurement,
-#             cls.folds_ground_truth)
-#         cls.assertEqual(len(clf_names), 2)
-#         cls.assertEqual(diversity_measure, 42)
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py
deleted file mode 100644
index 7e6fd3d70bd5ad84ec62aee0b3e00c8ec336cc34..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_double_fault_fusion.py
+++ /dev/null
@@ -1,22 +0,0 @@
-
-import numpy as np
-import unittest
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import double_fault_fusion
-
-
-class Test_disagree(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.monoview_decision_1 = np.array([0, 0, 0, 0, 1, 1, 1, 1])
-        cls.monoview_decision_2 = np.array([0, 0, 1, 1, 0, 0, 1, 1])
-        cls.ground_truth = np.array([0, 1, 0, 1, 0, 1, 0, 1])
-        cls.clf = double_fault_fusion.DoubleFaultFusion()
-
-    def test_simple(cls):
-        double_fault = cls.clf.diversity_measure(cls.monoview_decision_1,
-                                                    cls.monoview_decision_2,
-                                                    cls.ground_truth)
-        np.testing.assert_array_equal(double_fault,
-                                      np.array([False, True, False, False, False, False, True, False]))
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py
deleted file mode 100644
index dc88bfcc3070b57707f4f8931a55ea1c337f468d..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_entropy_fusion.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import unittest
-
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import entropy_fusion
-
-
-class Test_difficulty_fusion(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state=np.random.RandomState(42)
-        cls.classifiers_decisions = cls.random_state.randint(0, 2, size=(5, 3, 5))
-        cls.combination = [1, 3, 4]
-        cls.y = np.array([1, 1, 0, 0, 1])
-        cls.clf = entropy_fusion.EntropyFusion()
-
-    def test_simple(cls):
-        entropy = cls.clf.diversity_measure(
-            cls.classifiers_decisions,
-            cls.combination,
-            cls.y)
-        cls.assertAlmostEqual(entropy, 0.2)
diff --git a/multiview_platform/tests/test_multiview_classifiers/test_weighted_linear_early_fusion.py b/multiview_platform/tests/test_multiview_classifiers/test_weighted_linear_early_fusion.py
deleted file mode 100644
index c86607525dc958adf14907a8d3da190682a6adb8..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_multiview_classifiers/test_weighted_linear_early_fusion.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import unittest
-
-import numpy as np
-import os
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import \
-    weighted_linear_early_fusion
-
-class Test_WeightedLinearEarlyFusion(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.random_state = np.random.RandomState(42)
-        cls.view_weights = [0.5, 0.5]
-        cls.monoview_classifier_name = "decision_tree"
-        cls.monoview_classifier_config = {"max_depth":1, "criterion": "gini", "splitter": "best"}
-        cls.classifier = weighted_linear_early_fusion.WeightedLinearEarlyFusion(
-            random_state=cls.random_state, view_weights=cls.view_weights,
-            monoview_classifier_name=cls.monoview_classifier_name,
-            monoview_classifier_config=cls.monoview_classifier_config)
-        cls.dataset = test_dataset
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        np.testing.assert_array_equal(self.view_weights, self.classifier.view_weights)
-
-    def test_fit(self):
-        self.assertRaises(AttributeError, getattr,
-                          self.classifier.monoview_classifier, "classes_")
-        self.classifier.fit(self.dataset, test_dataset.get_labels(), None, None)
-        np.testing.assert_array_equal(self.classifier.monoview_classifier.classes_,
-                                      np.array([0,1]))
-
-    def test_predict(self):
-        self.classifier.fit(self.dataset, test_dataset.get_labels(), None, None)
-        predicted_labels = self.classifier.predict(self.dataset, None, None)
-        np.testing.assert_array_equal(predicted_labels, test_dataset.get_labels())
-
-    def test_transform_data_to_monoview_simple(self):
-        example_indices, X = self.classifier.transform_data_to_monoview(self.dataset,
-                                                  None, None)
-        self.assertEqual(X.shape, (5,12))
-        np.testing.assert_array_equal(X, np.concatenate((self.dataset.get_v(0), self.dataset.get_v(1)), axis=1))
-        np.testing.assert_array_equal(example_indices, np.arange(5))
-
-    def test_transform_data_to_monoview_view_select(self):
-        example_indices, X = self.classifier.transform_data_to_monoview(
-            self.dataset,
-            None, np.array([0]))
-        self.assertEqual(X.shape, (5, 6))
-        np.testing.assert_array_equal(X, self.dataset.get_v(0))
-        np.testing.assert_array_equal(example_indices, np.arange(5))
-
-    def test_transform_data_to_monoview_example_view_select(self):
-        example_indices, X = self.classifier.transform_data_to_monoview(
-            self.dataset,
-            np.array([1,2,3]), np.array([0]))
-        self.assertEqual(X.shape, (3, 6))
-        np.testing.assert_array_equal(X, self.dataset.get_v(0)[np.array([1,2,3]), :])
-        np.testing.assert_array_equal(example_indices, np.array([1,2,3]))
-
diff --git a/multiview_platform/tests/test_result_analysis/__init__.py b/multiview_platform/tests/test_result_analysis/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_result_analysis/test_duration_analysis.py b/multiview_platform/tests/test_result_analysis/test_duration_analysis.py
deleted file mode 100644
index efe6c68d792fe1d638821ea70f649a342f41a664..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_duration_analysis.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import unittest
-import numpy as np
-import pandas as pd
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis import duration_analysis
-
-class FakeClassifierResult:
-
-    def __init__(self, i=0):
-        self.i=i
-        if i == 0:
-            self.hps_duration = 10
-            self.fit_duration = 12
-            self.pred_duration = 15
-        else:
-            self.hps_duration = 1
-            self.fit_duration = 2
-            self.pred_duration = 5
-
-
-    def get_classifier_name(self):
-        if self.i == 0:
-            return 'test1'
-        else:
-            return 'test2'
-
-
-
-class Test_get_duration(unittest.TestCase):
-
-    def test_simple(self):
-        results = [FakeClassifierResult(), FakeClassifierResult(i=1)]
-        durs = duration_analysis.get_duration(results)
-        pd.testing.assert_frame_equal(durs,
-                                      pd.DataFrame(index=['test1', 'test2'],
-                                                   columns=['hps', 'fit', 'pred'],
-                                                   data=np.array([np.array([10,12,15]),
-                                                                  np.array([1,2,5])]),
-                                                   dtype=object))
-
-
diff --git a/multiview_platform/tests/test_result_analysis/test_error_analysis.py b/multiview_platform/tests/test_result_analysis/test_error_analysis.py
deleted file mode 100644
index 07ec87c2e4b8d96dc93db8d92e63d346ddcfcf2b..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_error_analysis.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import unittest
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
-from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis.error_analysis import get_example_errors, gen_error_data, gen_error_data_glob
-
-
-class Test_get_example_errors(unittest.TestCase):
-
-    def test_simple(self):
-        ground_truth = np.array([0,1,0,1,0,1,0,1, -100])
-        results = [MultiviewResult("mv", "", {"accuracy_score": [0.7, 0.75],
-                                              "f1_score": [0.71, 0.76]},
-                                   np.array([0,0,0,0,1,1,1,1,1]),
-                                   0,0,0, {}),
-                   MonoviewResult(0,
-                                  "dt",
-                                  "1",
-                                  {"accuracy_score": [0.8, 0.85],
-                                   "f1_score": [0.81, 0.86]}
-                                  , np.array([0,0,1,1,0,0,1,1,0]), "", "",
-                                  "", "",0,0, {})
-                   ]
-        example_errors = get_example_errors(ground_truth,
-                                                            results)
-        self.assertIsInstance(example_errors, dict)
-        np.testing.assert_array_equal(example_errors["mv"],
-                                      np.array([1,0,1,0,0,1,0,1,-100]))
-        np.testing.assert_array_equal(example_errors["dt-1"],
-                                      np.array([1, 0, 0, 1, 1, 0, 0, 1,-100]))
-
-class Test_gen_error_data(unittest.TestCase):
-
-    def test_simple(self):
-        random_state = np.random.RandomState(42)
-        ada_data = random_state.randint(0,2,size=7)
-        mv_data = random_state.randint(0, 2, size=7)
-        example_errors = {"ada-1": ada_data,
-                          "mv": mv_data}
-        nb_classifiers, nb_examples, classifiers_names, \
-        data_2d, error_on_examples = gen_error_data(example_errors)
-        self.assertEqual(nb_classifiers, 2)
-        self.assertEqual(nb_examples, 7)
-        self.assertEqual(classifiers_names, ["ada-1", "mv"])
-        np.testing.assert_array_equal(data_2d, np.array([ada_data, mv_data]).transpose())
-        np.testing.assert_array_equal(error_on_examples, (ada_data+mv_data)/nb_classifiers)
-
-
-
-class Test_gen_error_data_glob(unittest.TestCase):
-
-    def test_simple(self):
-        random_state = np.random.RandomState(42)
-
-        ada_error_data_1 = random_state.randint(0,2,7)
-        ada_error_data_2 = random_state.randint(0, 2, 7)
-        ada_sum = ada_error_data_1+ada_error_data_2
-        mv_error_data_1 = random_state.randint(0, 2, 7)
-        mv_error_data_2 = random_state.randint(0, 2, 7)
-        mv_sum = mv_error_data_1+mv_error_data_2
-
-        combi_results = {"ada-1":ada_sum, "mv": mv_sum}
-
-        stats_iter = 2
-
-        nb_examples, nb_classifiers, \
-        data, error_on_examples, \
-        classifier_names = gen_error_data_glob(combi_results,
-                                                              stats_iter)
-        self.assertEqual(nb_examples, 7)
-        self.assertEqual(nb_classifiers, 2)
-        np.testing.assert_array_equal(data, np.array([ada_sum, mv_sum]).transpose())
-        np.testing.assert_array_equal(error_on_examples, np.sum(np.array([ada_sum, mv_sum]), axis=0)/(nb_classifiers*stats_iter))
-        self.assertEqual(classifier_names, ["ada-1", "mv"])
\ No newline at end of file
diff --git a/multiview_platform/tests/test_result_analysis/test_execution.py b/multiview_platform/tests/test_result_analysis/test_execution.py
deleted file mode 100644
index f42f818c48a2e774c23a51b75542a5b9b1cd76f9..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_execution.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import unittest
-import numpy as np
-import pandas as pd
-import os
-
-from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
-from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis.execution import format_previous_results, get_arguments, analyze_iterations
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-
-class FakeClassifierResult:
-
-    def __init__(self, i=1):
-        self.classifier_name='test'+str(i)
-        self.full_labels_pred = np.array([0,1,1,2,1])
-        self.hps_duration=i
-        self.fit_duration=i
-        self.pred_duration=i
-
-    def get_classifier_name(self):
-        return self.classifier_name
-
-class Test_format_previous_results(unittest.TestCase):
-
-    def test_simple(self):
-        iter_results = {"metrics_scores":[], "example_errors":[], "feature_importances":[], "labels":[], "durations":[], "class_metrics_scores":[]}
-        random_state = np.random.RandomState(42)
-
-        # Gen metrics data
-        metrics_1_data = random_state.uniform(size=(2,2))
-        metrics_2_data = random_state.uniform(size=(2,2))
-        metric_1_df = pd.DataFrame(data=metrics_1_data, index=["train", "test"],
-                                   columns=["ada-1", "mv"])
-        metric_2_df = pd.DataFrame(data=metrics_2_data, index=["train", "test"],
-                                   columns=["ada-1", "mv"])
-        iter_results["metrics_scores"].append({"acc": metric_1_df})
-        iter_results["metrics_scores"].append({"acc": metric_2_df})
-
-        # Gen error data
-        ada_error_data_1 = random_state.randint(0,2,7)
-        ada_error_data_2 = random_state.randint(0, 2, 7)
-        ada_sum = ada_error_data_1+ada_error_data_2
-        mv_error_data_1 = random_state.randint(0, 2, 7)
-        mv_error_data_2 = random_state.randint(0, 2, 7)
-        mv_sum = mv_error_data_1+mv_error_data_2
-        iter_results["example_errors"].append({})
-        iter_results["example_errors"].append({})
-        iter_results["example_errors"][0]["ada-1"] = ada_error_data_1
-        iter_results["example_errors"][0]["mv"] = mv_error_data_1
-        iter_results["example_errors"][1]["ada-1"] = ada_error_data_2
-        iter_results["example_errors"][1]["mv"] = mv_error_data_2
-
-        iter_results["durations"].append(pd.DataFrame(index=["ada-1", "mv"],
-                                                         columns=["plif", "plaf"],
-                                                         data=np.zeros((2,2))))
-        iter_results["durations"].append(pd.DataFrame(index=["ada-1", "mv"],
-                                                         columns=["plif",
-                                                                  "plaf"],
-                                                         data=np.ones((2, 2))))
-
-        # Running the function
-        metric_analysis, class_met, error_analysis, \
-        feature_importances, feature_stds, \
-        labels, durations_mean, duration_std = format_previous_results(iter_results)
-        mean_df = pd.DataFrame(data=np.mean(np.array([metrics_1_data,
-                                                      metrics_2_data]),
-                                            axis=0),
-                               index=["train", "test"],
-                               columns=["ada-1", "mvm"])
-        std_df =  pd.DataFrame(data=np.std(np.array([metrics_1_data,
-                                                      metrics_2_data]),
-                                            axis=0),
-                               index=["train", "test"],
-                               columns=["ada-1", "mvm"])
-
-        # Testing
-        np.testing.assert_array_equal(metric_analysis["acc"]["mean"].loc["train"],
-                                      mean_df.loc["train"])
-        np.testing.assert_array_equal(metric_analysis["acc"]["mean"].loc["test"],
-            mean_df.loc["test"])
-        np.testing.assert_array_equal(metric_analysis["acc"]["std"].loc["train"],
-            std_df.loc["train"])
-        np.testing.assert_array_equal(metric_analysis["acc"]["std"].loc["test"],
-            std_df.loc["test"])
-        np.testing.assert_array_equal(ada_sum, error_analysis["ada-1"])
-        np.testing.assert_array_equal(mv_sum, error_analysis["mv"])
-        self.assertEqual(durations_mean.at["ada-1", 'plif'], 0.5)
-
-class Test_get_arguments(unittest.TestCase):
-
-    def setUp(self):
-        self.benchamrk_argument_dictionaries = [{"flag":"good_flag", "valid":True},
-                                                {"flag":"bad_flag", "valid":False}]
-
-    def test_benchmark_wanted(self):
-        argument_dict = get_arguments(self.benchamrk_argument_dictionaries, "good_flag")
-        self.assertTrue(argument_dict["valid"])
-
-
-class Test_analyze_iterations(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-        cls.results = [[0, [FakeClassifierResult(), FakeClassifierResult(i=2)], []], [1, [FakeClassifierResult(), FakeClassifierResult(i=2)], []]]
-        cls.benchmark_argument_dictionaries = [{"labels_dictionary":{0:"zero",1:"one",2:"two"}, "flag":0, "directory":tmp_path, "args":{"name":"test_dataset"}},{"labels_dictionary":{0:"zero",1:"one",2:"two"}, "flag":1, "directory":tmp_path, "args":{"name":"test_dataset"}} ]
-        cls.stats_iter = 2
-        cls.metrics = {}
-        cls.example_ids = ['ex1', 'ex5','ex4','ex3','ex2',]
-        cls.labels = np.array([0,1,2,1,1])
-
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        analysis = analyze_iterations(self.results,
-                                      self.benchmark_argument_dictionaries,
-                                      self.stats_iter,
-                                      self.metrics,
-                                      self.example_ids,
-                                      self.labels)
-        res, iter_res, tracebacks, labels_names = analysis
-        self.assertEqual(labels_names, ['zero', 'one', 'two'])
-
-        self.assertEqual(iter_res['class_metrics_scores'], [{}, {}])
-
-        pd.testing.assert_frame_equal(iter_res['durations'][0], pd.DataFrame(index=['test1','test2'],
-                                 columns=['hps', 'fit', 'pred'],
-                            data=np.array([1,1,1,2,2,2]).reshape((2,3)), dtype=object))
-        np.testing.assert_array_equal(iter_res['example_errors'][0]['test1'], np.array([1, 1, 0, 0, 1]))
-        self.assertEqual(iter_res["feature_importances"], [{},{}])
-        np.testing.assert_array_equal(iter_res['labels'], np.array([0, 1, 2, 1, 1]))
-        self.assertEqual(iter_res['metrics_scores'], [{},{}])
-
-
diff --git a/multiview_platform/tests/test_result_analysis/test_feature_importances.py b/multiview_platform/tests/test_result_analysis/test_feature_importances.py
deleted file mode 100644
index 2a69e88c2bcbb036611d4a3006a69ac144b22004..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_feature_importances.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import unittest
-import numpy as np
-import pandas as pd
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis import feature_importances
-from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
-
-class FakeClassifier:
-    def __init__(self, i=0):
-        self.feature_importances_ = [i, i+1]
-
-class FakeClassifierResult(MonoviewResult):
-
-    def __init__(self, i=0):
-        self.i=i
-        self.hps_duration = i*10
-        self.fit_duration = (i+2)*10
-        self.pred_duration = (i+5)*10
-        self.clf = FakeClassifier(i)
-        self.view_name = 'testview'+str(i)
-        self.classifier_name = "test"+str(i)
-
-    def get_classifier_name(self):
-        return self.classifier_name
-
-
-
-class Test_get_duration(unittest.TestCase):
-
-    def test_simple(self):
-        results = [FakeClassifierResult(), FakeClassifierResult(i=1)]
-        feat_importance = feature_importances.get_feature_importances(results)
-        pd.testing.assert_frame_equal(feat_importance["testview1"],
-                                      pd.DataFrame(index=None,columns=['test1'],
-                                                   data=np.array([1,2]).reshape((2,1)),
-                                                   ))
\ No newline at end of file
diff --git a/multiview_platform/tests/test_result_analysis/test_metric_analysis.py b/multiview_platform/tests/test_result_analysis/test_metric_analysis.py
deleted file mode 100644
index a34f06a462784b1358b0af57c48df95da62fbd82..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_metric_analysis.py
+++ /dev/null
@@ -1,180 +0,0 @@
-import unittest
-import numpy as np
-import pandas as pd
-import os
-
-from multiview_platform.mono_multi_view_classifiers.monoview.monoview_utils import MonoviewResult
-from multiview_platform.mono_multi_view_classifiers.multiview.multiview_utils import MultiviewResult
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis.metric_analysis import get_metrics_scores, init_plot, get_fig_size, sort_by_test_score
-
-class Test_get_metrics_scores(unittest.TestCase):
-
-
-    def test_simple(self):
-        metrics = {"accuracy_score*":{},"f1_score":{}}
-        results = [MonoviewResult(0,
-                                  "ada",
-                                  "0",
-                                  {"accuracy_score*":[0.9, 0.95],
-                                   "f1_score":[0.91, 0.96]}
-                                  , "", "", "", "", "",0,0,{})]
-        metrics_scores, class_met = get_metrics_scores(metrics,
-                                                            results, [])
-        self.assertIsInstance(metrics_scores, dict)
-        self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
-        np.testing.assert_array_equal(np.array(metrics_scores["accuracy_score*"].loc["train"]), np.array([0.9]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["accuracy_score*"].loc["test"]),
-            np.array([0.95]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["train"]),
-            np.array([0.91]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["test"]),
-            np.array([0.96]))
-        np.testing.assert_array_equal(np.array(metrics_scores["f1_score"].columns),
-                                      np.array(["ada-0"]))
-
-    def test_multiple_monoview_classifiers(self):
-        metrics = {"accuracy_score*":{},"f1_score":{}}
-        results = [MonoviewResult(view_index=0,
-                                  classifier_name="ada",
-                                  view_name="0",
-                                  metrics_scores={"accuracy_score*": [0.9, 0.95],
-                                   "f1_score": [0.91, 0.96]},
-                                  full_labels_pred="",
-                                  classifier_config="",
-                                  classifier="",
-                                  n_features="",
-                                  hps_duration=0,
-                                  fit_duration=0,
-                                  pred_duration=0,
-                                  class_metric_scores={}),
-                   MonoviewResult(view_index=0,
-                                  classifier_name="dt",
-                                  view_name="1",
-                                  metrics_scores={"accuracy_score*": [0.8, 0.85],
-                                   "f1_score": [0.81, 0.86]},
-                                  full_labels_pred="",
-                                  classifier_config="",
-                                  classifier="",
-                                  n_features="",
-                                  hps_duration=0,
-                                  fit_duration=0,
-                                  pred_duration=0,
-                                  class_metric_scores={})
-                   ]
-        metrics_scores, class_met = get_metrics_scores(metrics,
-                                                            results, [])
-        self.assertIsInstance(metrics_scores, dict)
-        self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["accuracy_score*"].loc["train"]),
-            np.array([0.9, 0.8]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["accuracy_score*"].loc["test"]),
-            np.array([0.95, 0.85]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["train"]),
-            np.array([0.91, 0.81]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["test"]),
-            np.array([0.96, 0.86]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].columns),
-            np.array(["ada-0", "dt-1"]))
-
-    def test_mutiview_result(self):
-        metrics = {"accuracy_score*":{},"f1_score":{}}
-        results = [MultiviewResult("mv", "", {"accuracy_score*": [0.7, 0.75],
-                                   "f1_score": [0.71, 0.76]}, "",0,0,0, {}),
-                   MonoviewResult(view_index=0,
-                                  classifier_name="dt",
-                                  view_name="1",
-                                  metrics_scores={"accuracy_score*": [0.8, 0.85],
-                                                  "f1_score": [0.81, 0.86]},
-                                  full_labels_pred="",
-                                  classifier_config="",
-                                  classifier="",
-                                  n_features="",
-                                  hps_duration=0,
-                                  fit_duration=0,
-                                  pred_duration=0,
-                                  class_metric_scores={})
-                   ]
-        metrics_scores, class_met = get_metrics_scores(metrics,
-                                                            results, [])
-        self.assertIsInstance(metrics_scores, dict)
-        self.assertIsInstance(metrics_scores["accuracy_score*"], pd.DataFrame)
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["accuracy_score*"].loc["train"]),
-            np.array([0.7, 0.8]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["accuracy_score*"].loc["test"]),
-            np.array([0.75, 0.85]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["train"]),
-            np.array([0.71, 0.81]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].loc["test"]),
-            np.array([0.76, 0.86]))
-        np.testing.assert_array_equal(
-            np.array(metrics_scores["f1_score"].columns),
-            np.array(["mv", "dt-1"]))
-
-
-class Test_init_plot(unittest.TestCase):
-
-    def test_simple(self):
-        results = []
-        metric_name = "acc"
-        data = np.random.RandomState(42).uniform(0,1,(2,2))
-        metric_dataframe = pd.DataFrame(index=["train", "test"],
-                                        columns=["dt-1", "mv"], data=data)
-        directory = "dir"
-        database_name = 'db'
-        labels_names = ['lb1', "lb2"]
-        class_met = metric_dataframe = pd.DataFrame(index=["train", "test"],
-                                        columns=["dt-1", "mv"], data=data)
-        train, test, classifier_names, \
-        file_name, nb_results, results, class_test = init_plot(results,
-                                                                   metric_name,
-                                                                   metric_dataframe,
-                                                                   directory,
-                                                                   database_name,
-                                                                   class_met)
-        self.assertEqual(file_name, os.path.join("dir", "db-acc"))
-        np.testing.assert_array_equal(train, data[0,:])
-        np.testing.assert_array_equal(test, data[1, :])
-        np.testing.assert_array_equal(classifier_names, np.array(["dt-1", "mv"]))
-        self.assertEqual(nb_results, 2)
-        self.assertEqual(results, [["dt-1", "acc", data[1,0], 0.0, data[1,0]],
-                                   ["mv", "acc", data[1,1], 0.0, data[1,1]]])
-
-
-class Test_small_func(unittest.TestCase):
-
-    def test_fig_size(self):
-        kw, width = get_fig_size(5)
-        self.assertEqual(kw, {"figsize":(15,5)})
-        self.assertEqual(width, 0.35)
-        kw, width = get_fig_size(100)
-        self.assertEqual(kw, {"figsize": (100, 100/3)})
-        self.assertEqual(width, 0.35)
-
-    def test_sort_by_test_scores(self):
-        train_scores = np.array([1,2,3,4])
-        test_scores = np.array([4, 3, 2, 1])
-        train_STDs = np.array([1, 2, 3, 4])
-        test_STDs = np.array([1, 2, 3, 4])
-        names = np.array(['1', '2', '3', '4'])
-        sorted_names, sorted_train_scores, \
-        sorted_test_scores, sorted_train_STDs, \
-        sorted_test_STDs = sort_by_test_score(train_scores, test_scores,
-                                              names, train_STDs, test_STDs)
-        np.testing.assert_array_equal(sorted_names, np.array(['4', '3', '2', '1']))
-        np.testing.assert_array_equal(sorted_test_scores, [1, 2, 3, 4])
-        np.testing.assert_array_equal(sorted_test_STDs, [4, 3, 2, 1])
-        np.testing.assert_array_equal(sorted_train_scores, [4, 3, 2, 1])
-        np.testing.assert_array_equal(sorted_train_STDs, [4, 3, 2, 1])
\ No newline at end of file
diff --git a/multiview_platform/tests/test_result_analysis/test_noise_analysis.py b/multiview_platform/tests/test_result_analysis/test_noise_analysis.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py b/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py
deleted file mode 100644
index 61296f85e01400c823ecf7bc384a8f2751d20f4a..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_result_analysis/test_tracebacks_analysis.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import unittest
-import numpy as np
-import pandas as pd
-import os
-
-from multiview_platform.mono_multi_view_classifiers.result_analysis import tracebacks_analysis
-from multiview_platform.tests.utils import tmp_path, rm_tmp
-
-class FakeClassifierResult:
-
-    def __init__(self, i=0):
-        self.i=i
-        if i == 0:
-            self.hps_duration = 10
-            self.fit_duration = 12
-            self.pred_duration = 15
-        else:
-            self.hps_duration = 1
-            self.fit_duration = 2
-            self.pred_duration = 5
-
-
-    def get_classifier_name(self):
-        if self.i == 0:
-            return 'test1'
-        else:
-            return 'test2'
-
-
-
-class Test_funcs(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-        cls.res_file = open(os.path.join(tmp_path,"tmp.txt"), "w")
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_save_dict_to_text(self):
-        keys = tracebacks_analysis.save_dict_to_text({"a":"i", "b":"j"}, self.res_file)
-        self.res_file.close()
-        self.assertEqual(list(keys),["a", "b"])
-        with open(os.path.join(tmp_path,"tmp.txt"), 'r') as res_file:
-            self.assertEqual(res_file.read(), 'Failed algorithms : \n\ta,\n\tb.\n\n\na\n\ni\n\n\nb\n\nj\n\n\n')
diff --git a/multiview_platform/tests/test_utils/__init__.py b/multiview_platform/tests/test_utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py b/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
deleted file mode 100644
index c1068e7be8527b3302b868a8463bcdab0a479c51..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_GetMultiviewDB.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-
-from multiview_platform.mono_multi_view_classifiers.utils import get_multiview_db
-from multiview_platform.tests.utils import rm_tmp, tmp_path
-
-
-class Test_get_classic_db_hdf5(unittest.TestCase):
-
-    def setUp(self):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        self.rs = np.random.RandomState(42)
-        self.nb_view = 3
-        self.file_name = "test.hdf5"
-        self.nb_examples = 5
-        self.nb_class = 3
-        self.views = [self.rs.randint(0, 10, size=(self.nb_examples, 7))
-                      for _ in range(self.nb_view)]
-        self.labels = self.rs.randint(0, self.nb_class, self.nb_examples)
-        self.dataset_file = h5py.File(os.path.join(tmp_path, self.file_name), 'w')
-        self.view_names = ["ViewN" + str(index) for index in
-                           range(len(self.views))]
-        self.are_sparse = [False for _ in self.views]
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(self.view_names, self.views, self.are_sparse)):
-            view_dataset = self.dataset_file.create_dataset(
-                "View" + str(view_index),
-                view.shape,
-                data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-        labels_dataset = self.dataset_file.create_dataset("Labels",
-                                                          shape=self.labels.shape,
-                                                          data=self.labels)
-        self.labels_names = [str(index) for index in np.unique(self.labels)]
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in self.labels_names]
-        meta_data_grp = self.dataset_file.create_group("Metadata")
-        meta_data_grp.attrs["nbView"] = len(self.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels))
-        meta_data_grp.attrs["datasetLength"] = len(self.labels)
-
-    def test_simple(self):
-        dataset , labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
-            ["ViewN2"], tmp_path, self.file_name.split(".")[0],
-            self.nb_class, ["0", "2"],
-            self.rs, path_for_new=tmp_path)
-        self.assertEqual(dataset.nb_view, 1)
-        self.assertEqual(labels_dictionary,
-                         {0: "0", 1: "2", 2:"1"})
-        self.assertEqual(dataset.get_nb_examples(), 5)
-        self.assertEqual(len(np.unique(dataset.get_labels())), 3)
-
-
-    def test_all_views_asked(self):
-        dataset, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
-            None, tmp_path, self.file_name.split(".")[0],
-            self.nb_class, ["0", "2"],
-            self.rs, path_for_new=tmp_path)
-        self.assertEqual(dataset.nb_view, 3)
-        self.assertEqual(dataset.get_view_dict(), {'ViewN0': 0, 'ViewN1': 1, 'ViewN2': 2})
-
-    def test_asked_the_whole_dataset(self):
-        dataset, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_hdf5(
-            ["ViewN2"], tmp_path, self.file_name.split(".")[0],
-            self.nb_class, ["0", "2"],
-            self.rs, path_for_new=tmp_path, full=True)
-        self.assertEqual(dataset.dataset, self.dataset_file)
-
-    def tearDown(self):
-        rm_tmp()
-
-
-class Test_get_classic_db_csv(unittest.TestCase):
-
-    def setUp(self):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        self.pathF = tmp_path
-        self.NB_CLASS = 2
-        self.nameDB = "test_dataset"
-        self.askedLabelsNames = ["test_label_1", "test_label_3"]
-        self.random_state = np.random.RandomState(42)
-        self.views = ["test_view_1", "test_view_3"]
-        np.savetxt(self.pathF + self.nameDB + "-labels-names.csv",
-                   np.array(["test_label_0", "test_label_1",
-                             "test_label_2", "test_label_3"]), fmt="%s",
-                   delimiter=",")
-        np.savetxt(self.pathF + self.nameDB + "-labels.csv",
-                   self.random_state.randint(0, 4, 10), delimiter=",")
-        os.mkdir(self.pathF + "Views")
-        self.datas = []
-        for i in range(4):
-            data = self.random_state.randint(0, 100, (10, 20))
-            np.savetxt(self.pathF + "Views/test_view_" + str(i) + ".csv",
-                       data, delimiter=",")
-            self.datas.append(data)
-
-
-    def test_simple(self):
-        dataset, labels_dictionary, dataset_name = get_multiview_db.get_classic_db_csv(
-            self.views, self.pathF, self.nameDB,
-            self.NB_CLASS, self.askedLabelsNames,
-            self.random_state, delimiter=",", path_for_new=tmp_path)
-        self.assertEqual(dataset.nb_view, 2)
-        self.assertEqual(dataset.get_view_dict(), {'test_view_1': 0, 'test_view_3': 1})
-        self.assertEqual(labels_dictionary,
-                         {0: "test_label_1", 1: "test_label_3"})
-        self.assertEqual(dataset.get_nb_examples(), 3)
-        self.assertEqual(dataset.get_nb_class(), 2)
-
-
-    @classmethod
-    def tearDown(self):
-        for i in range(4):
-            os.remove(
-                tmp_path+"Views/test_view_" + str(
-                    i) + ".csv")
-        os.rmdir(tmp_path+"Views")
-        os.remove(
-            tmp_path+"test_dataset-labels-names.csv")
-        os.remove(tmp_path+"test_dataset-labels.csv")
-        os.remove(tmp_path+"test_dataset.hdf5")
-        os.remove(
-            tmp_path+"test_dataset_temp_filter.hdf5")
-        os.rmdir(tmp_path)
-
-class Test_get_plausible_db_hdf5(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.path = tmp_path
-        cls.nb_class=3
-        cls.rs = np.random.RandomState(42)
-        cls.nb_view=3
-        cls.nb_examples = 5
-        cls.nb_features = 4
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        dataset, labels_dict, name = get_multiview_db.get_plausible_db_hdf5(
-            "", self.path, "", nb_class=self.nb_class, random_state=self.rs,
-            nb_view=3, nb_examples=self.nb_examples,
-            nb_features=self.nb_features)
-        self.assertEqual(dataset.init_example_indices(), range(5))
-        self.assertEqual(dataset.get_nb_class(), self.nb_class)
-
-    def test_two_class(self):
-        dataset, labels_dict, name = get_multiview_db.get_plausible_db_hdf5(
-            "", self.path, "", nb_class=2, random_state=self.rs,
-            nb_view=3, nb_examples=self.nb_examples,
-            nb_features=self.nb_features)
-        self.assertEqual(dataset.init_example_indices(), range(5))
-        self.assertEqual(dataset.get_nb_class(), 2)
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_utils/test_base.py b/multiview_platform/tests/test_utils/test_base.py
deleted file mode 100644
index 027da26f44111e18dae9eb1fe77c704b312fbfd7..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_base.py
+++ /dev/null
@@ -1,261 +0,0 @@
-import os
-import unittest
-import yaml
-import numpy as np
-from sklearn.tree import DecisionTreeClassifier
-from sklearn.model_selection import StratifiedKFold
-from sklearn.metrics import accuracy_score, f1_score
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path
-from multiview_platform.mono_multi_view_classifiers.utils import base
-
-
-class FakeClassifier(base.BaseClassifier):
-    def __init__(self, no_params=False, accepts_mc=True):
-        if no_params:
-            self.param_names = []
-            self.classed_params = []
-        else:
-            self.param_names = ["test1", "test2"]
-            self.classed_params = ["test2"]
-            self.weird_strings = []
-        self.accepts_mc = accepts_mc
-
-    def get_params(self, deep=True):
-        return {"test1": 10,
-                             "test2": "test"}
-
-    def fit(self, X, y):
-        if np.unique(y).shape[0]>2 and not self.accepts_mc:
-            raise ValueError('Does not accept MC')
-        else:
-            return self
-
-
-class FakeDetector:
-    def __init__(self):
-        self.best_params_ = {"test1": 10,
-                             "test2": "test"}
-        self.cv_results_ = {"param_test1": [10],
-                             "param_test2": ["test"]}
-
-class FakeResultAnalyzer(base.ResultAnalyser):
-
-    def get_view_specific_info(self):
-        return "test"
-
-    def get_base_string(self):
-        return 'test2'
-
-class Test_ResultAnalyzer(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.rs = np.random.RandomState(42)
-        cls.classifier = FakeClassifier()
-        cls.n_examples = 50
-        cls.n_classes = 3
-        cls.train_length = 24
-        cls.train_indices = cls.rs.choice(np.arange(cls.n_examples),
-                                          size=cls.train_length,
-                                          replace=False)
-        cls.test_indices = np.array([i for i in range(cls.n_examples)
-                                     if i not in cls.train_indices])
-        cls.test_length = cls.test_indices.shape[0]
-        cls.classification_indices = [cls.train_indices, cls.test_indices]
-        cls.n_splits = 5
-        cls.k_folds = StratifiedKFold(n_splits=cls.n_splits, )
-        cls.hps_method = "randomized_search"
-        cls.metrics_list = {"accuracy_score": {}, "f1_score*":{}}
-        cls.n_iter = 6
-        cls.class_label_names = ["class{}".format(ind+1)
-                                  for ind in range(cls.n_classes)]
-        cls.pred = cls.rs.randint(0, cls.n_classes,
-                                          size=cls.n_examples)
-        cls.directory = "fake_directory"
-        cls.base_file_name = "fake_file"
-        cls.labels = cls.rs.randint(0, cls.n_classes,
-                                           size=cls.n_examples)
-        cls.database_name = "test_database"
-        cls.nb_cores = 0.5
-        cls.duration = -4
-        cls.train_accuracy = accuracy_score(cls.labels[cls.train_indices],
-                                            cls.pred[cls.train_indices])
-        cls.test_accuracy = accuracy_score(cls.labels[cls.test_indices],
-                                            cls.pred[cls.test_indices])
-        cls.train_f1 = f1_score(cls.labels[cls.train_indices],
-                                cls.pred[cls.train_indices], average='micro')
-        cls.test_f1 = f1_score(cls.labels[cls.test_indices],
-                               cls.pred[cls.test_indices], average='micro')
-
-    def test_simple(self):
-        RA = base.ResultAnalyser(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method, self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred, self.directory,
-                                 self.base_file_name, self.labels,
-                                 self.database_name, self.nb_cores,
-                                 self.duration)
-
-    def test_get_metric_scores(self):
-        RA = base.ResultAnalyser(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        cl_train, cl_test,train_score, test_score = RA.get_metric_score("accuracy_score", {})
-        np.testing.assert_array_equal(train_score, self.train_accuracy)
-        np.testing.assert_array_equal(test_score, self.test_accuracy)
-
-    def test_get_all_metrics_scores(self):
-        RA = base.ResultAnalyser(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        RA.get_all_metrics_scores()
-        self.assertEqual(RA.metric_scores["accuracy_score"][0],
-                         self.train_accuracy)
-        self.assertEqual(RA.metric_scores["accuracy_score"][1],
-                         self.test_accuracy)
-        self.assertEqual(RA.metric_scores["f1_score*"][0],
-                         self.train_f1)
-        self.assertEqual(RA.metric_scores["f1_score*"][1],
-                         self.test_f1)
-
-    def test_print_metrics_scores(self):
-        RA = base.ResultAnalyser(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        RA.get_all_metrics_scores()
-        string = RA.print_metric_score()
-        print(repr(string))
-        self.assertEqual(string, '\n\n\tFor Accuracy score using {}, (higher is better) : \n\t\t- Score on train : 0.25\n\t\t- Score on test : 0.2692307692307692\n\n\tFor F1 score using average: micro, {} (higher is better) : \n\t\t- Score on train : 0.25\n\t\t- Score on test : 0.2692307692307692\n\nTest set confusion matrix : \n\n╒════════╤══════════╤══════════╤══════════╕\n│        │   class1 │   class2 │   class3 │\n╞════════╪══════════╪══════════╪══════════╡\n│ class1 │        3 │        1 │        2 │\n├────────┼──────────┼──────────┼──────────┤\n│ class2 │        3 │        2 │        2 │\n├────────┼──────────┼──────────┼──────────┤\n│ class3 │        3 │        8 │        2 │\n╘════════╧══════════╧══════════╧══════════╛\n\n')
-
-    def test_get_db_config_string(self):
-        RA = FakeResultAnalyzer(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        self.assertEqual(RA.get_db_config_string(), 'Database configuration : \n\t- Database name : test_database\ntest\t- Learning Rate : 0.48\n\t- Labels used : class1, class2, class3\n\t- Number of cross validation folds : 5\n\n')
-
-    def test_get_classifier_config_string(self):
-        RA = base.ResultAnalyser(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        self.assertEqual(RA.get_classifier_config_string(), 'Classifier configuration : \n\t- FakeClassifier with test1 : 10, test2 : test\n\t- Executed on 0.5 core(s) \n\t- Got configuration using randomized search with 6  iterations \n')
-
-    def test_analyze(self):
-        RA = FakeResultAnalyzer(self.classifier, self.classification_indices,
-                                 self.k_folds, self.hps_method,
-                                 self.metrics_list,
-                                 self.n_iter, self.class_label_names,
-                                 self.pred,
-                                 self.directory, self.base_file_name,
-                                 self.labels, self.database_name,
-                                 self.nb_cores, self.duration)
-        str_analysis, img_analysis, metric_scores, class_metric_scores, conf_mat = RA.analyze()
-        print(repr(str_analysis))
-        self.assertEqual(str_analysis, 'test2Database configuration : \n\t- Database name : test_database\ntest\t- Learning Rate : 0.48\n\t- Labels used : class1, class2, class3\n\t- Number of cross validation folds : 5\n\nClassifier configuration : \n\t- FakeClassifier with test1 : 10, test2 : test\n\t- Executed on 0.5 core(s) \n\t- Got configuration using randomized search with 6  iterations \n\n\n\tFor Accuracy score using {}, (higher is better) : \n\t\t- Score on train : 0.25\n\t\t- Score on test : 0.2692307692307692\n\n\tFor F1 score using average: micro, {} (higher is better) : \n\t\t- Score on train : 0.25\n\t\t- Score on test : 0.2692307692307692\n\nTest set confusion matrix : \n\n╒════════╤══════════╤══════════╤══════════╕\n│        │   class1 │   class2 │   class3 │\n╞════════╪══════════╪══════════╪══════════╡\n│ class1 │        3 │        1 │        2 │\n├────────┼──────────┼──────────┼──────────┤\n│ class2 │        3 │        2 │        2 │\n├────────┼──────────┼──────────┼──────────┤\n│ class3 │        3 │        8 │        2 │\n╘════════╧══════════╧══════════╧══════════╛\n\n\n\n Classification took -1 day, 23:59:56\n\n Classifier Interpretation : \n')
-
-
-
-class Test_BaseClassifier(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.base_estimator = "DecisionTreeClassifier"
-        cls.base_estimator_config = {"max_depth":10,
-                                     "splitter": "best"}
-        cls.est = base.BaseClassifier()
-        cls.rs = np.random.RandomState(42)
-
-    def test_simple(self):
-        base_estim = self.est.get_base_estimator(self.base_estimator,
-                                            self.base_estimator_config)
-        self.assertTrue(isinstance(base_estim, DecisionTreeClassifier))
-        self.assertEqual(base_estim.max_depth, 10)
-        self.assertEqual(base_estim.splitter, "best")
-
-    def test_gen_best_params(self):
-        fake_class = FakeClassifier()
-        best_params = fake_class.gen_best_params(FakeDetector())
-        self.assertEqual(best_params, {"test1":10, "test2":"test"})
-
-    def test_gen_params_from_detector(self):
-        fake_class = FakeClassifier()
-        params = fake_class.gen_params_from_detector(FakeDetector())
-        self.assertEqual(params, [("test1",np.array([10])),
-                                  ("test2",np.array(["str"], dtype='<U3'))])
-        params = FakeClassifier(no_params=True).gen_params_from_detector(FakeDetector())
-        self.assertEqual(params, [()])
-
-    def test_params_to_string(self):
-        fake_class = FakeClassifier()
-        string = fake_class.params_to_string()
-        self.assertEqual(string, "test1 : 10, test2 : test")
-
-    def test_get_iterpret(self):
-        fake_class = FakeClassifier()
-        self.assertEqual("", fake_class.get_interpretation("", "", "",))
-
-    def test_accepts_mutliclass(self):
-        accepts = FakeClassifier().accepts_multi_class(self.rs)
-        self.assertEqual(accepts, True)
-        accepts = FakeClassifier(accepts_mc=False).accepts_multi_class(self.rs)
-        self.assertEqual(accepts, False)
-        self.assertRaises(ValueError, FakeClassifier().accepts_multi_class, self.rs, **{"n_samples":2})
-
-
-    def test_class(self):
-        base_estimator = DecisionTreeClassifier(max_depth=15, splitter="random")
-        base_estim = self.est.get_base_estimator(base_estimator,
-                                            self.base_estimator_config)
-        self.assertTrue(isinstance(base_estim, DecisionTreeClassifier))
-        self.assertEqual(base_estim.max_depth, 10)
-        self.assertEqual(base_estim.splitter, "best")
-
-    def test_wrong_args(self):
-        base_estimator_config = {"n_estimators": 10,
-                                 "splitter": "best"}
-        with self.assertRaises(TypeError):
-            base_estim = self.est.get_base_estimator(self.base_estimator,
-                                                     base_estimator_config)
-
-    def test_get_config(self):
-        conf = FakeClassifier(no_params=True).get_config()
-        self.assertEqual(conf, 'FakeClassifier with no config.')
-
-class Test_Functions(unittest.TestCase):
-
-    def test_get_name(self):
-        classed_list = ["test", 42]
-        np.testing.assert_array_equal(base.get_names(classed_list),
-                         np.array(["str", "int"], dtype="<U3"))
-
-
-    def test_get_metric(self):
-        from multiview_platform.mono_multi_view_classifiers.metrics import accuracy_score
-        metrics_dict = {"accuracy_score*":{}}
-        self.assertEqual(base.get_metric(metrics_dict), (accuracy_score, {}))
-
diff --git a/multiview_platform/tests/test_utils/test_configuration.py b/multiview_platform/tests/test_utils/test_configuration.py
deleted file mode 100644
index dc1fed6ccc7288d0988f52f921ed23b8179dfb68..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_configuration.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import os
-import unittest
-import yaml
-import numpy as np
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path
-from multiview_platform.mono_multi_view_classifiers.utils import configuration
-
-
-class Test_get_the_args(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        cls.path_to_config_file = tmp_path+"config_temp.yml"
-        path_file = os.path.dirname(os.path.abspath(__file__))
-        make_tmp_dir = os.path.join(path_file, "../tmp_tests")
-        os.mkdir(make_tmp_dir)
-        data = {"log": 10, "name":[12.5, 1e-06], "type":True}
-        with open(cls.path_to_config_file, "w") as config_file:
-            yaml.dump(data, config_file)
-
-    @classmethod
-    def tearDownClass(cls):
-        os.remove(tmp_path+"config_temp.yml")
-        os.rmdir(tmp_path)
-
-    def test_file_loading(self):
-        config_dict = configuration.get_the_args(self.path_to_config_file)
-        self.assertEqual(type(config_dict), dict)
-
-    def test_dict_format(self):
-        config_dict = configuration.get_the_args(self.path_to_config_file)
-        self.assertIn("log", config_dict)
-        self.assertIn("name", config_dict)
-
-    def test_arguments(self):
-        config_dict = configuration.get_the_args(self.path_to_config_file)
-        self.assertEqual(config_dict["log"], 10)
-        self.assertEqual(config_dict["name"], [12.5, 1e-06])
-        self.assertEqual(config_dict["type"], True)
-
-class Test_save_config(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        path_file = os.path.dirname(os.path.abspath(__file__))
-        make_tmp_dir = os.path.join(path_file, "../tmp_tests")
-        os.mkdir(make_tmp_dir)
-
-    def test_simple(self):
-        configuration.save_config(tmp_path, {"test":10})
-        with open(os.path.join(tmp_path,"config_file.yml" ), 'r') as stream:
-            yaml_config = yaml.safe_load(stream)
-        self.assertEqual(yaml_config,{"test":10} )
-
-    @classmethod
-    def tearDownClass(cls):
-        os.remove(os.path.join(tmp_path, "config_file.yml"))
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_utils/test_dataset.py b/multiview_platform/tests/test_utils/test_dataset.py
deleted file mode 100644
index 76644bcbddc227877a398e99c3f47f22d82cd22e..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_dataset.py
+++ /dev/null
@@ -1,423 +0,0 @@
-import unittest
-import h5py
-import numpy as np
-import os
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path
-from multiview_platform.mono_multi_view_classifiers.utils import dataset
-
-
-class Test_Dataset(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.rs = np.random.RandomState(42)
-        cls.nb_view = 3
-        cls.file_name = "test.hdf5"
-        cls.nb_examples = 5
-        cls.nb_attr = 7
-        cls.nb_class = 3
-        cls.views = [cls.rs.randint(0, 10, size=(cls.nb_examples, cls.nb_attr))
-                     for _ in range(cls.nb_view)]
-        cls.labels = cls.rs.randint(0, cls.nb_class, cls.nb_examples)
-        cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name), "w")
-        cls.view_names = ["ViewN" + str(index) for index in range(len(cls.views))]
-        cls.are_sparse = [False for _ in cls.views]
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(cls.view_names, cls.views, cls.are_sparse)):
-            view_dataset = cls.dataset_file.create_dataset("View" + str(view_index),
-                                                           view.shape,
-                                                           data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-        labels_dataset = cls.dataset_file.create_dataset("Labels",
-                                                         shape=cls.labels.shape,
-                                                         data=cls.labels)
-        cls.labels_names = [str(index) for index in np.unique(cls.labels)]
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in cls.labels_names]
-        meta_data_grp = cls.dataset_file.create_group("Metadata")
-        meta_data_grp.attrs["nbView"] = len(cls.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(cls.labels))
-        meta_data_grp.attrs["datasetLength"] = len(cls.labels)
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.dataset_file.close()
-
-    def test_get_shape(self):
-        dataset_object = dataset.HDF5Dataset(views=self.views,
-                                             labels=self.labels,
-                                             are_sparse=self.are_sparse,
-                                             file_name="from_scratch" + self.file_name,
-                                             view_names=self.view_names,
-                                             path=tmp_path,
-                                             labels_names=self.labels_names)
-        shape = dataset_object.get_shape(0)
-        self.assertEqual(shape, (5,7))
-
-    def test_to_numpy_array(self):
-        dataset_object = dataset.HDF5Dataset(views=self.views,
-                                             labels=self.labels,
-                                             are_sparse=self.are_sparse,
-                                             file_name="from_scratch" + self.file_name,
-                                             view_names=self.view_names,
-                                             path=tmp_path,
-                                             labels_names=self.labels_names)
-        array, limits = dataset_object.to_numpy_array(view_indices=[0,1,2])
-
-        self.assertEqual(array.shape, (5, 21))
-
-    def test_filter(self):
-        """Had to create a new dataset to aviod playing with the class one"""
-        file_name = "test_filter.hdf5"
-        dataset_file_filter = h5py.File(os.path.join(tmp_path, file_name), "w")
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(self.view_names, self.views, self.are_sparse)):
-            view_dataset = dataset_file_filter.create_dataset(
-                "View" + str(view_index),
-                view.shape,
-                data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-        labels_dataset = dataset_file_filter.create_dataset("Labels",
-                                                         shape=self.labels.shape,
-                                                         data=self.labels)
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in self.labels_names]
-        meta_data_grp = dataset_file_filter.create_group("Metadata")
-        meta_data_grp.attrs["nbView"] = len(self.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels))
-        meta_data_grp.attrs["datasetLength"] = len(self.labels)
-        dataset_object = dataset.HDF5Dataset(hdf5_file=dataset_file_filter)
-        dataset_object.filter(np.array([0, 1, 0]), ["0", "1"], [1, 2, 3],
-                              ["ViewN0"], tmp_path)
-        self.assertEqual(dataset_object.nb_view, 1)
-        np.testing.assert_array_equal(dataset_object.get_labels(), [0, 1, 0])
-        dataset_object.dataset.close()
-        os.remove(os.path.join(tmp_path, "test_filter_temp_filter.hdf5"))
-        os.remove(os.path.join(tmp_path, "test_filter.hdf5"))
-
-    def test_for_hdf5_file(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-
-    def test_from_scratch(self):
-        dataset_object = dataset.HDF5Dataset(views=self.views,
-                                             labels=self.labels,
-                                             are_sparse=self.are_sparse,
-                                             file_name="from_scratch"+self.file_name,
-                                             view_names=self.view_names,
-                                             path=tmp_path,
-                                             labels_names=self.labels_names)
-        nb_class = dataset_object.get_nb_class()
-        self.assertEqual(nb_class, self.nb_class)
-        example_indices = dataset_object.init_example_indices()
-        self.assertEqual(example_indices, range(self.nb_examples))
-        view = dataset_object.get_v(0)
-        np.testing.assert_array_equal(view, self.views[0])
-
-    def test_init_example_indices(self):
-        example_indices = dataset.HDF5Dataset(
-            hdf5_file=self.dataset_file).init_example_indices()
-        self.assertEqual(example_indices, range(self.nb_examples))
-        example_indices = dataset.HDF5Dataset(
-            hdf5_file=self.dataset_file).init_example_indices([0, 1, 2])
-        self.assertEqual(example_indices, [0,1,2])
-
-    def test_get_v(self):
-        view = dataset.HDF5Dataset(hdf5_file=self.dataset_file).get_v(0)
-        np.testing.assert_array_equal(view, self.views[0])
-        view = dataset.HDF5Dataset(hdf5_file=self.dataset_file).get_v(1, [0,1,2])
-        np.testing.assert_array_equal(view, self.views[1][[0,1,2,], :])
-
-    def test_get_nb_class(self):
-        nb_class = dataset.HDF5Dataset(hdf5_file=self.dataset_file).get_nb_class()
-        self.assertEqual(nb_class, self.nb_class)
-        nb_class = dataset.HDF5Dataset(hdf5_file=self.dataset_file).get_nb_class([0])
-        self.assertEqual(nb_class, 1)
-
-
-
-    def test_get_view_dict(self):
-        dataset_object = dataset.HDF5Dataset(views=self.views,
-                                         labels=self.labels,
-                                         are_sparse=self.are_sparse,
-                                         file_name="from_scratch" + self.file_name,
-                                         view_names=self.view_names,
-                                         path=tmp_path,
-                                         labels_names=self.labels_names)
-        self.assertEqual(dataset_object.get_view_dict(), {"ViewN0":0,
-                                                          "ViewN1": 1,
-                                                          "ViewN2": 2,})
-
-    def test_get_label_names(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        raw_label_names = dataset_object.get_label_names(decode=False)
-        decoded_label_names = dataset_object.get_label_names()
-        restricted_label_names = dataset_object.get_label_names(example_indices=[3,4])
-        self.assertEqual(raw_label_names, [b'0', b'1', b'2'])
-        self.assertEqual(decoded_label_names, ['0', '1', '2'])
-        self.assertEqual(restricted_label_names, ['2'])
-
-    def test_get_nb_exmaples(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        nb_examples = dataset_object.get_nb_examples()
-        self.assertEqual(nb_examples, self.nb_examples)
-
-    def test_get_labels(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        labels = dataset_object.get_labels()
-        np.testing.assert_array_equal(labels, self.labels)
-        labels = dataset_object.get_labels([1,2,0])
-        np.testing.assert_array_equal(labels, self.labels[[1,2,0]])
-
-    def test_copy_view(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        new_dataset = h5py.File(os.path.join(tmp_path, "test_copy.hdf5"), "w")
-        dataset_object.copy_view(target_dataset=new_dataset,
-                                 source_view_name="ViewN0",
-                                 target_view_index=1)
-        self.assertIn("View1", list(new_dataset.keys()))
-        np.testing.assert_array_equal(dataset_object.get_v(0), new_dataset["View1"][()])
-        self.assertEqual(new_dataset["View1"].attrs["name"], "ViewN0")
-        new_dataset.close()
-        os.remove(os.path.join(tmp_path, "test_copy.hdf5"))
-
-    def test_get_name(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        self.assertEqual("test", dataset_object.get_name())
-
-    def test_select_labels(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        labels, label_names, indices = dataset_object.select_labels(["0", "2"])
-        np.testing.assert_array_equal(np.unique(labels), np.array([0,1]))
-        self.assertEqual(label_names, ["0","2"])
-
-    def test_check_selected_label_names(self):
-        dataset_object = dataset.HDF5Dataset(hdf5_file=self.dataset_file)
-        names = dataset_object.check_selected_label_names(nb_labels=2, random_state=self.rs)
-        self.assertEqual(names, ["1", "0"])
-        names = dataset_object.check_selected_label_names(selected_label_names=['0', '2'],
-                                                          random_state=self.rs)
-        self.assertEqual(names, ["0", "2"])
-
-    def test_select_views_and_labels(self):
-        file_name = "test_filter.hdf5"
-        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(self.view_names, self.views, self.are_sparse)):
-            view_dataset = dataset_file_select.create_dataset(
-                "View" + str(view_index),
-                view.shape,
-                data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-        labels_dataset = dataset_file_select.create_dataset("Labels",
-                                                            shape=self.labels.shape,
-                                                            data=self.labels)
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in self.labels_names]
-        meta_data_grp = dataset_file_select.create_group("Metadata")
-        meta_data_grp.attrs["nbView"] = len(self.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels))
-        meta_data_grp.attrs["datasetLength"] = len(self.labels)
-        dataset_object = dataset.HDF5Dataset(hdf5_file=dataset_file_select)
-        names = dataset_object.select_views_and_labels(nb_labels=2, view_names=["ViewN0"], random_state=self.rs, path_for_new=tmp_path)
-        self.assertEqual(names, {0: '2', 1: '1'})
-        self.assertEqual(dataset_object.nb_view, 1)
-        dataset_object.dataset.close()
-        os.remove(os.path.join(tmp_path, "test_filter_temp_filter.hdf5"))
-        os.remove(os.path.join(tmp_path, "test_filter.hdf5"))
-
-    def test_add_gaussian_noise(self):
-        file_name = "test_noise.hdf5"
-        dataset_file_select = h5py.File(os.path.join(tmp_path, file_name), "w")
-        limits = np.zeros((self.nb_attr, 2))
-        limits[:, 1] += 100
-        meta_data_grp = dataset_file_select.create_group("Metadata")
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(self.view_names, self.views, self.are_sparse)):
-            view_dataset = dataset_file_select.create_dataset(
-                "View" + str(view_index),
-                view.shape,
-                data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-            meta_data_grp.create_dataset("View"+str(view_index)+"_limits", data= limits)
-        labels_dataset = dataset_file_select.create_dataset("Labels",
-                                                            shape=self.labels.shape,
-                                                            data=self.labels)
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in self.labels_names]
-        meta_data_grp.attrs["nbView"] = len(self.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(self.labels))
-        meta_data_grp.attrs["datasetLength"] = len(self.labels)
-        dataset_object = dataset.HDF5Dataset(hdf5_file=dataset_file_select)
-        dataset_object.add_gaussian_noise(self.rs, tmp_path)
-        dataset_object.dataset.close()
-        os.remove(os.path.join(tmp_path, "test_noise_noised.hdf5"))
-        os.remove(os.path.join(tmp_path, "test_noise.hdf5"))
-
-class TestRAMDataset(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.rs = np.random.RandomState(42)
-        cls.nb_view = 3
-        cls.file_name = "test.hdf5"
-        cls.nb_examples = 5
-        cls.nb_attr = 7
-        cls.nb_class = 3
-        cls.views = [cls.rs.randint(0, 10, size=(cls.nb_examples, cls.nb_attr))
-                     for _ in range(cls.nb_view)]
-        cls.labels = cls.rs.randint(0, cls.nb_class, cls.nb_examples)
-        cls.view_names = ["ViewN" + str(index) for index in
-                          range(len(cls.views))]
-        cls.are_sparse = [False for _ in cls.views]
-        cls.labels_names = [str(index) for index in np.unique(cls.labels)]
-
-    def test_get_view_name(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                             labels=self.labels,
-                                             are_sparse=self.are_sparse,
-                                             view_names=self.view_names,
-                                             labels_names=self.labels_names)
-        self.assertEqual(dataset_object.get_view_name(0),
-                         "ViewN0")
-
-    def test_init_attrs(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                            labels=self.labels,
-                                            are_sparse=self.are_sparse,
-                                            view_names=self.view_names,
-                                            labels_names=self.labels_names)
-
-
-        dataset_object.init_attrs()
-        self.assertEqual(dataset_object.nb_view, 3)
-
-    def test_get_label_names(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                             labels=self.labels,
-                                             are_sparse=self.are_sparse,
-                                             view_names=self.view_names,
-                                             labels_names=self.labels_names)
-        shape = dataset_object.get_label_names()
-        self.assertEqual(shape, ['0'.encode('utf-8'),
-                                 '1'.encode('utf-8'),
-                                 '2'.encode('utf-8')])
-        shape = dataset_object.get_label_names(decode=False)
-        self.assertEqual(shape, ['0'.encode('utf-8'),
-                                 '1'.encode('utf-8'),
-                                 '2'.encode('utf-8')])
-
-    def test_get_v(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                            labels=self.labels,
-                                            are_sparse=self.are_sparse,
-                                            view_names=self.view_names,
-                                            labels_names=self.labels_names)
-        data = dataset_object.get_v(0, 1)
-        np.testing.assert_array_equal(data, np.array([6, 7, 4, 3, 7, 7, 2]))
-        data = dataset_object.get_v(0, None)
-        np.testing.assert_array_equal(data, np.array([[6, 3, 7, 4, 6, 9, 2],
-                                                     [6, 7, 4, 3, 7, 7, 2],
-                                                     [5, 4, 1, 7, 5, 1, 4],
-                                                     [0, 9, 5, 8, 0, 9, 2],
-                                                     [6, 3, 8, 2, 4, 2, 6]]))
-
-    def test_filter(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                            labels=self.labels,
-                                            are_sparse=self.are_sparse,
-                                            view_names=self.view_names,
-                                            labels_names=self.labels_names)
-        dataset_object.filter("", "", np.array([1,2]), ["ViewN0", "ViewN1"],
-               path=None)
-        self.assertEqual(dataset_object.nb_view, 2)
-        self.assertEqual(dataset_object.labels.shape, (2,1))
-
-    def test_get_view_dict(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                            labels=self.labels,
-                                            are_sparse=self.are_sparse,
-                                            view_names=self.view_names,
-                                            labels_names=self.labels_names)
-        d = dataset_object.get_view_dict()
-        self.assertEqual(d, {'ViewN0': 0, 'ViewN1': 1, 'ViewN2': 2})
-
-    def test_get_name(self):
-        dataset_object = dataset.RAMDataset(views=self.views,
-                                            labels=self.labels,
-                                            are_sparse=self.are_sparse,
-                                            view_names=self.view_names,
-                                            labels_names=self.labels_names)
-        n = dataset_object.get_name()
-        self.assertEqual(n, None)
-
-class Test_Functions(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-        rm_tmp()
-        os.mkdir(tmp_path)
-        cls.rs = np.random.RandomState(42)
-        cls.nb_view = 3
-        cls.file_name = "test0.hdf5"
-        cls.nb_examples = 5
-        cls.nb_attr = 7
-        cls.nb_class = 3
-        cls.views = [cls.rs.randint(0, 10, size=(cls.nb_examples, cls.nb_attr))
-                     for _ in range(cls.nb_view)]
-        cls.labels = cls.rs.randint(0, cls.nb_class, cls.nb_examples)
-        cls.dataset_file = h5py.File(os.path.join(tmp_path, cls.file_name), "w")
-        cls.view_names = ["ViewN" + str(index) for index in
-                          range(len(cls.views))]
-        cls.are_sparse = [False for _ in cls.views]
-        for view_index, (view_name, view, is_sparse) in enumerate(
-                zip(cls.view_names, cls.views, cls.are_sparse)):
-            view_dataset = cls.dataset_file.create_dataset(
-                "View" + str(view_index),
-                view.shape,
-                data=view)
-            view_dataset.attrs["name"] = view_name
-            view_dataset.attrs["sparse"] = is_sparse
-        labels_dataset = cls.dataset_file.create_dataset("Labels",
-                                                         shape=cls.labels.shape,
-                                                         data=cls.labels)
-        cls.labels_names = [str(index) for index in np.unique(cls.labels)]
-        labels_dataset.attrs["names"] = [label_name.encode()
-                                         for label_name in cls.labels_names]
-        meta_data_grp = cls.dataset_file.create_group("Metadata")
-        meta_data_grp.attrs["nbView"] = len(cls.views)
-        meta_data_grp.attrs["nbClass"] = len(np.unique(cls.labels))
-        meta_data_grp.attrs["datasetLength"] = len(cls.labels)
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.dataset_file.close()
-        rm_tmp()
-
-    def test_datasets_already_exist(self):
-        self.assertEqual(True, dataset.datasets_already_exist(tmp_path, "test", 1))
-
-    def test_init_multiple_datasets(self):
-        dataset.init_multiple_datasets(tmp_path, "test0", 2)
-        self.assertTrue(os.path.isfile(os.path.join(tmp_path,'test00.hdf5')))
-        dataset.delete_HDF5([{"args":{"pathf":tmp_path, "name":"test0"}}],
-                            2, dataset.HDF5Dataset(hdf5_file=self.dataset_file))
-        self.assertFalse(os.path.isfile(os.path.join(tmp_path,'test00.hdf5')))
-
-
-
-
-
-
-
-
-
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_utils/test_execution.py b/multiview_platform/tests/test_utils/test_execution.py
deleted file mode 100644
index 1e97963e0bb42fbcb6a3a8cd9d74108aa5048ca6..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_execution.py
+++ /dev/null
@@ -1,361 +0,0 @@
-import os
-import unittest
-
-import numpy as np
-
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-
-from multiview_platform.mono_multi_view_classifiers.utils import execution
-
-
-class Test_parseTheArgs(unittest.TestCase):
-
-    def setUp(self):
-        self.args = []
-
-    def test_empty_args(self):
-        args = execution.parse_the_args([])
-
-class Test_init_log_file(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        res_dir = execution.init_log_file(name="test_dataset",
-                                          views=["V1", "V2", "V3"],
-                                          cl_type="",
-                                          log=True,
-                                          debug=False,
-                                          label="No",
-                                          result_directory=tmp_path,
-                                          args={})
-        self.assertTrue(res_dir.startswith(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),"tmp_tests", "test_dataset", "started" )))
-
-    def test_no_log(self):
-        res_dir = execution.init_log_file(name="test_dataset",
-                                          views=["V1", "V2", "V3"],
-                                          cl_type="",
-                                          log=False,
-                                          debug=False,
-                                          label="No1",
-                                          result_directory=tmp_path,
-                                          args={})
-        self.assertTrue(res_dir.startswith(os.path.join(
-            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
-            "tmp_tests", "test_dataset", "started")))
-
-    def test_debug(self):
-        res_dir = execution.init_log_file(name="test_dataset",
-                                          views=["V1", "V2", "V3"],
-                                          cl_type="",
-                                          log=True,
-                                          debug=True,
-                                          label="No",
-                                          result_directory=tmp_path,
-                                          args={})
-        self.assertTrue(res_dir.startswith(os.path.join(
-            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
-            "tmp_tests", "test_dataset", "debug_started")))
-
-class Test_gen_k_folds(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.statsIter = 1
-
-    @classmethod
-    def tearDownClass(cls):
-        pass
-
-    def test_simple(self):
-        folds_list = execution.gen_k_folds(stats_iter=1,
-                                           nb_folds=4,
-                                           stats_iter_random_states=np.random.RandomState(42))
-        self.assertEqual(folds_list[0].n_splits, 4)
-        self.assertEqual(len(folds_list), 1)
-
-    def test_multple_iters(self):
-        folds_list = execution.gen_k_folds(stats_iter=2,
-                                           nb_folds=4,
-                                           stats_iter_random_states=[np.random.RandomState(42), np.random.RandomState(43)])
-        self.assertEqual(folds_list[0].n_splits, 4)
-        self.assertEqual(len(folds_list), 2)
-
-    def test_list_rs(self):
-        folds_list = execution.gen_k_folds(stats_iter=1,
-                                           nb_folds=4,
-                                           stats_iter_random_states=[np.random.RandomState(42)])
-        self.assertEqual(folds_list[0].n_splits, 4)
-        self.assertEqual(len(folds_list), 1)
-
-
-class Test_init_views(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.statsIter = 1
-
-    @classmethod
-    def tearDownClass(cls):
-        pass
-
-    def test_simple(self):
-        views, views_indices, all_views = execution.init_views(test_dataset, ["ViewN1", "ViewN2"])
-        self.assertEqual(views,  ["ViewN1", "ViewN2"])
-        self.assertEqual(views_indices, [1,2])
-        self.assertEqual(all_views, ["ViewN0", "ViewN1", "ViewN2"])
-
-        views, views_indices, all_views = execution.init_views(test_dataset,None)
-        self.assertEqual(views, ["ViewN0", "ViewN1", "ViewN2"])
-        self.assertEqual(views_indices, range(3))
-        self.assertEqual(all_views, ["ViewN0", "ViewN1", "ViewN2"])
-
-
-class Test_find_dataset_names(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        os.mkdir(tmp_path)
-        with open(os.path.join(tmp_path, "test.txt"), "w") as file_stream:
-            file_stream.write("test")
-        with open(os.path.join(tmp_path, "test1.txt"), "w") as file_stream:
-            file_stream.write("test")
-
-
-
-    @classmethod
-    def tearDownClass(cls):
-        rm_tmp()
-
-    def test_simple(self):
-        path, names = execution.find_dataset_names(tmp_path, ".txt", ["test"])
-        self.assertEqual(path, tmp_path)
-        self.assertEqual(names, ["test"])
-        path, names = execution.find_dataset_names(tmp_path, ".txt", ["test", 'test1'])
-        self.assertEqual(path, tmp_path)
-        self.assertIn("test1", names)
-        path, names = execution.find_dataset_names("examples/data", ".hdf5", ["all"])
-        self.assertIn("doc_summit", names)
-        self.assertRaises(ValueError, execution.find_dataset_names, tmp_path+"test", ".txt",
-                                                   ["test"])
-        self.assertRaises(ValueError, execution.find_dataset_names, tmp_path, ".txt", ["ah"])
-
-
-class Test_initStatsIterRandomStates(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.statsIter = 1
-
-    def test_one_statiter(cls):
-        cls.state = cls.random_state.get_state()[1]
-        statsIterRandomStates = execution.init_stats_iter_random_states(
-
-            cls.statsIter, cls.random_state)
-        np.testing.assert_array_equal(statsIterRandomStates[0].get_state()[1],
-                                      cls.state)
-
-    def test_multiple_iter(cls):
-        cls.statsIter = 3
-        statsIterRandomStates = execution.init_stats_iter_random_states(
-
-            cls.statsIter, cls.random_state)
-        cls.assertAlmostEqual(len(statsIterRandomStates), 3)
-        cls.assertNotEqual(statsIterRandomStates[0].randint(5000),
-                           statsIterRandomStates[1].randint(5000))
-        cls.assertNotEqual(statsIterRandomStates[0].randint(5000),
-                           statsIterRandomStates[2].randint(5000))
-        cls.assertNotEqual(statsIterRandomStates[2].randint(5000),
-                           statsIterRandomStates[1].randint(5000))
-
-
-class Test_getDatabaseFunction(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.name = "zrtTap"
-        cls.type = ".csv"
-
-    def test_simple(cls):
-        getDB = execution.get_database_function(cls.name, cls.type)
-        from multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db import \
-            get_classic_db_csv
-        cls.assertEqual(getDB, get_classic_db_csv)
-
-    def test_hdf5(cls):
-        cls.type = ".hdf5"
-        getDB = execution.get_database_function(cls.name, cls.type)
-        from multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db import \
-            get_classic_db_hdf5
-        cls.assertEqual(getDB, get_classic_db_hdf5)
-
-    def test_plausible_hdf5(cls):
-        cls.name = "plausible"
-        cls.type = ".hdf5"
-        getDB = execution.get_database_function(cls.name, cls.type)
-        from multiview_platform.mono_multi_view_classifiers.utils.get_multiview_db import \
-            get_plausible_db_hdf5
-        cls.assertEqual(getDB, get_plausible_db_hdf5)
-
-
-class Test_initRandomState(unittest.TestCase):
-
-    def setUp(self):
-        rm_tmp()
-        os.mkdir(tmp_path)
-
-    def tearDown(self):
-        os.rmdir(tmp_path)
-
-    def test_random_state_42(self):
-        randomState_42 = np.random.RandomState(42)
-        randomState = execution.init_random_state("42",
-                                                tmp_path)
-        os.remove(tmp_path+"random_state.pickle")
-        np.testing.assert_array_equal(randomState.beta(1, 100, 100),
-                                      randomState_42.beta(1, 100, 100))
-
-    def test_random_state_pickle(self):
-        randomState_to_pickle = execution.init_random_state(None,
-                                                          tmp_path)
-        pickled_randomState = execution.init_random_state(
-            tmp_path+"random_state.pickle",
-            tmp_path)
-        os.remove(tmp_path+"random_state.pickle")
-
-        np.testing.assert_array_equal(randomState_to_pickle.beta(1, 100, 100),
-                                      pickled_randomState.beta(1, 100, 100))
-
-
-class FakeArg():
-
-    def __init__(self):
-        self.name = "zrtTap"
-        self.CL_type = ["fromage", "jambon"]
-        self.views = ["view1", "view2"]
-        self.log = True
-
-
-# Impossible to test as the main directory is notthe same for the exec and the test
-# class Test_initLogFile(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         cls.fakeArgs = FakeArg()
-#         cls.timestr = time.strftime("%Y_%m_%d-%H_%M")
-#
-#     def test_initLogFile(cls):
-#         cls.timestr = time.strftime("%Y_%m_%d-%H_%M")
-#         execution.initLogFile(cls.fakeArgs)
-#         cls.assertIn("zrtTap", os.listdir("mutliview_platform/results"), "Database directory not created")
-#         cls.assertIn("started_"+cls.timestr, os.listdir("mutliview_platform/results/zrtTap"),"experimentation dir not created")
-#         cls.assertIn(cls.timestr + "-" + ''.join(cls.fakeArgs.CL_type) + "-" + "_".join(
-#         cls.fakeArgs.views) + "-" + cls.fakeArgs.name + "-LOG.log", os.listdir("mutliview_platform/results/zrtTap/"+"started_"+cls.timestr), "logfile was not created")
-#
-#     @classmethod
-#     def tearDownClass(cls):
-#         shutil.rmtree("multiview_platform/results/zrtTap")
-#         pass
-
-
-class Test_genSplits(unittest.TestCase):
-
-    def setUp(self):
-        self.stastIter = 3
-        self.statsIterRandomStates = [np.random.RandomState(42 + i + 1) for i in
-                                      range(self.stastIter)]
-        self.random_state = np.random.RandomState(42)
-        self.X_indices = self.random_state.randint(0, 500, 50)
-        self.labels = np.zeros(500)
-        self.labels[self.X_indices[:10]] = 1
-        self.labels[self.X_indices[11:30]] = 2  # To test multiclass
-        self.splitRatio = 0.2
-
-    def test_simple(self):
-        splits = execution.gen_splits(self.labels, self.splitRatio,
-                                     self.statsIterRandomStates)
-        self.assertEqual(len(splits), 3)
-        self.assertEqual(len(splits[1]), 2)
-        self.assertEqual(type(splits[1][0]), np.ndarray)
-        self.assertAlmostEqual(len(splits[1][0]), 0.8 * 500)
-        self.assertAlmostEqual(len(splits[1][1]), 0.2 * 500)
-        self.assertGreater(len(np.where(self.labels[splits[1][0]] == 0)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[1][0]] == 1)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[1][0]] == 2)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[1][1]] == 0)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[1][1]] == 1)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[1][1]] == 2)[0]), 0)
-
-    def test_genSplits_no_iter(self):
-        splits = execution.gen_splits(self.labels, self.splitRatio,
-                                     self.statsIterRandomStates)
-        self.assertEqual(len(splits), 3)
-        self.assertEqual(len(splits[0]), 2)
-        self.assertEqual(type(splits[0][0]), np.ndarray)
-        self.assertAlmostEqual(len(splits[0][0]), 0.8 * 500)
-        self.assertAlmostEqual(len(splits[0][1]), 0.2 * 500)
-        self.assertGreater(len(np.where(self.labels[splits[0][0]] == 0)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[0][0]] == 1)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[0][0]] == 2)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[0][1]] == 0)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[0][1]] == 1)[0]), 0)
-        self.assertGreater(len(np.where(self.labels[splits[0][1]] == 2)[0]), 0)
-
-
-class Test_genKFolds(unittest.TestCase):
-
-    def setUp(self):
-        self.statsIter = 2
-        self.nbFolds = 5
-        self.statsIterRandomStates = [np.random.RandomState(42),
-                                      np.random.RandomState(94)]
-
-    def test_genKFolds_iter(self):
-        pass
-
-
-class Test_genDirecortiesNames(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.directory = tmp_path
-        cls.stats_iter = 5
-
-    def test_simple_ovo(cls):
-        directories = execution.gen_direcorties_names(cls.directory,
-                                                    cls.stats_iter)
-        cls.assertEqual(len(directories), 5)
-        cls.assertEqual(directories[0], os.path.join(tmp_path, "iter_1"))
-        cls.assertEqual(directories[-1], os.path.join(tmp_path, "iter_5"))
-
-    def test_ovo_no_iter(cls):
-        cls.stats_iter = 1
-        directories = execution.gen_direcorties_names(cls.directory,
-                                                    cls.stats_iter)
-        cls.assertEqual(len(directories), 1)
-        cls.assertEqual(directories[0], tmp_path)
-
-
-class Test_genArgumentDictionaries(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.labelsDictionary = {0: "yes", 1: "No", 2: "Maybe"}
-        cls.direcories = ["Res/iter_1", "Res/iter_2"]
-        cls.multiclassLabels = [np.array([0, 1, -100, 1, 0]),
-                                np.array([1, 0, -100, 1, 0]),
-                                np.array([0, 1, -100, 0, 1])]
-        cls.labelsCombinations = [[0, 1], [0, 2], [1, 2]]
-        cls.indicesMulticlass = [[[[], []], [[], []], [[], []]], [[], [], []]]
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/test_utils/test_hyper_parameter_search.py b/multiview_platform/tests/test_utils/test_hyper_parameter_search.py
deleted file mode 100644
index 41287784af397b9db1246c513d257bf8c8716407..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_hyper_parameter_search.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import os
-import unittest
-
-import h5py
-import numpy as np
-from sklearn.model_selection import StratifiedKFold
-from sklearn.metrics import accuracy_score, make_scorer
-from multiview_platform.tests.utils import rm_tmp, tmp_path, test_dataset
-from sklearn.base import BaseEstimator
-import sys
-
-
-from multiview_platform.mono_multi_view_classifiers.utils.dataset import HDF5Dataset
-from multiview_platform.mono_multi_view_classifiers.utils import hyper_parameter_search
-from multiview_platform.mono_multi_view_classifiers.multiview_classifiers import weighted_linear_early_fusion
-
-
-
-
-
-class FakeEstim(BaseEstimator):
-    def __init__(self, param1=None, param2=None, random_state=None):
-        self.param1 = param1
-        self.param2 = param2
-
-    def fit(self, X, y,):
-        return self
-
-    def accepts_multi_class(self, rs):
-        return True
-
-    def predict(self, X):
-        return np.zeros(X.shape[0])
-
-class FakeEstimMV(BaseEstimator):
-    def __init__(self, param1=None, param2=None):
-        self.param1 = param1
-        self.param2 = param2
-
-    def fit(self, X, y,train_indices=None, view_indices=None):
-        self.y = y
-        return self
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        if self.param1=="return exact":
-            return self.y[example_indices]
-        else:
-            return np.zeros(example_indices.shape[0])
-
-
-
-class Test_Random(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        n_splits=2
-        cls.estimator = FakeEstim()
-        cls.param_distributions = {"param1":[10,100], "param2":[11, 101]}
-        cls.n_iter = 4
-        cls.refit = True
-        cls.n_jobs = 1
-        cls.scoring = make_scorer(accuracy_score, )
-        cls.cv = StratifiedKFold(n_splits=n_splits, )
-        cls.random_state = np.random.RandomState(42)
-        cls.learning_indices = np.array([0,1,2, 3, 4,])
-        cls.view_indices = None
-        cls.framework = "monoview"
-        cls.equivalent_draws = False
-        cls.X = cls.random_state.randint(0,100, (10,11))
-        cls.y = cls.random_state.randint(0,2, 10)
-
-    def test_simple(self):
-        hyper_parameter_search.Random(
-            self.estimator, self.param_distributions, n_iter=self.n_iter,
-            refit=self.refit, n_jobs=self.n_jobs, scoring=self.scoring, cv=self.cv,
-            random_state=self.random_state,
-            learning_indices=self.learning_indices, view_indices=self.view_indices,
-            framework=self.framework,
-            equivalent_draws=self.equivalent_draws
-        )
-
-    def test_fit(self):
-        RSCV = hyper_parameter_search.Random(
-            self.estimator, self.param_distributions, n_iter=self.n_iter,
-            refit=self.refit, n_jobs=self.n_jobs, scoring=self.scoring,
-            cv=self.cv,
-            random_state=self.random_state,
-            learning_indices=self.learning_indices,
-            view_indices=self.view_indices,
-            framework=self.framework,
-            equivalent_draws=self.equivalent_draws
-        )
-        RSCV.fit(self.X, self.y, )
-        tested_param1 = np.ma.masked_array(data=[10,10,100,100],
-                     mask=[False, False, False, False])
-        np.testing.assert_array_equal(RSCV.cv_results_['param_param1'],
-                                      tested_param1)
-
-    def test_fit_multiview(self):
-        RSCV = hyper_parameter_search.Random(
-            FakeEstimMV(), self.param_distributions, n_iter=self.n_iter,
-            refit=self.refit, n_jobs=self.n_jobs, scoring=self.scoring,
-            cv=self.cv,
-            random_state=self.random_state,
-            learning_indices=self.learning_indices,
-            view_indices=self.view_indices,
-            framework="multiview",
-            equivalent_draws=self.equivalent_draws
-        )
-        RSCV.fit(test_dataset, self.y, )
-        self.assertEqual(RSCV.n_iter, self.n_iter)
-
-    def test_fit_multiview_equiv(self):
-        self.n_iter=1
-        RSCV = hyper_parameter_search.Random(
-            FakeEstimMV(), self.param_distributions, n_iter=self.n_iter,
-            refit=self.refit, n_jobs=self.n_jobs, scoring=self.scoring,
-            cv=self.cv,
-            random_state=self.random_state,
-            learning_indices=self.learning_indices,
-            view_indices=self.view_indices,
-            framework="multiview",
-            equivalent_draws=True
-        )
-        RSCV.fit(test_dataset, self.y, )
-        self.assertEqual(RSCV.n_iter, self.n_iter*test_dataset.nb_view)
-
-    def test_gets_good_params(self):
-        self.param_distributions["param1"].append('return exact')
-        self.n_iter=6
-        RSCV = hyper_parameter_search.Random(
-            FakeEstimMV(), self.param_distributions, n_iter=self.n_iter,
-            refit=self.refit, n_jobs=self.n_jobs, scoring=self.scoring,
-            cv=self.cv,
-            random_state=self.random_state,
-            learning_indices=self.learning_indices,
-            view_indices=self.view_indices,
-            framework="multiview",
-            equivalent_draws=False
-        )
-        RSCV.fit(test_dataset, self.y, )
-        self.assertEqual(RSCV.best_params_["param1"], "return exact")
-
-
-class Test_Grid(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.parameter_grid = {"param1":[5,6], "param2":[7,8]}
-        cls.estimator = FakeEstim()
-
-    def test_simple(self):
-        grid = hyper_parameter_search.Grid(self.estimator,
-                                           param_grid=self.parameter_grid)
-
-    def test_get_candidate_params(self):
-        grid = hyper_parameter_search.Grid(self.estimator,
-                                           param_grid=self.parameter_grid)
-        grid.get_candidate_params(None)
-        self.assertEqual(grid.candidate_params, [{"param1": 5, "param2": 7},
-                                                 {"param1": 5, "param2": 8},
-                                                 {"param1": 6, "param2": 7},
-                                                 {"param1": 6, "param2": 8}])
-
-
-# if __name__ == '__main__':
-#     # unittest.main()
-#     suite = unittest.TestLoader().loadTestsFromTestCase(Test_randomized_search)
-#     unittest.TextTestRunner(verbosity=2).run(suite)
-# class Test_randomized_search(unittest.TestCase):
-#
-#     @classmethod
-#     def setUpClass(cls):
-#         rm_tmp()
-#         cls.random_state = np.random.RandomState(42)
-#         cls.view_weights = [0.5, 0.5]
-#         os.mkdir(tmp_path)
-#         cls.dataset_file = h5py.File(
-#             tmp_path+"test_file.hdf5", "w")
-#         cls.labels = cls.dataset_file.create_dataset("Labels",
-#                                                      data=np.array(
-#                                                          [0, 1, 0, 0, 1, 0, 1, 0, 0, 1, ]))
-#         cls.view0_data = cls.random_state.randint(1, 10, size=(10, 4))
-#         view0 = cls.dataset_file.create_dataset("View0",
-#                                                 data=cls.view0_data)
-#         view0.attrs["sparse"] = False
-#         view0.attrs["name"] = "ViewN0"
-#         cls.view1_data = cls.random_state.randint(1, 10, size=(10, 4))
-#         view1 = cls.dataset_file.create_dataset("View1",
-#                                                 data=cls.view1_data)
-#         view1.attrs["sparse"] = False
-#         view1.attrs["name"] = "ViewN1"
-#         metaDataGrp = cls.dataset_file.create_group("Metadata")
-#         metaDataGrp.attrs["nbView"] = 2
-#         metaDataGrp.attrs["nbClass"] = 2
-#         metaDataGrp.attrs["datasetLength"] = 10
-#         cls.monoview_classifier_name = "decision_tree"
-#         cls.monoview_classifier_config = {"max_depth": 1,
-#                                           "criterion": "gini",
-#                                           "splitter": "best"}
-#         cls.k_folds = StratifiedKFold(n_splits=3, random_state=cls.random_state,
-#                                       shuffle=True)
-#         cls.learning_indices = np.array([1,2,3,4, 5,6,7,8,9])
-#         cls.dataset = HDF5Dataset(hdf5_file=cls.dataset_file)
-#
-#     @classmethod
-#     def tearDownClass(cls):
-#         cls.dataset_file.close()
-#         rm_tmp()
-#
-#
-#     def test_simple(self):
-#         best_params, _, params, scores = hyper_parameter_search.randomized_search(
-#             self.dataset, self.labels[()], "multiview", self.random_state, tmp_path,
-#             weighted_linear_early_fusion, "WeightedLinearEarlyFusion", self.k_folds,
-#         1, ["accuracy_score", None], 2, {}, learning_indices=self.learning_indices)
-#         self.assertIsInstance(best_params, dict)
diff --git a/multiview_platform/tests/test_utils/test_multiclass.py b/multiview_platform/tests/test_utils/test_multiclass.py
deleted file mode 100644
index 178308ad4da87818e2ff388e3c84aa44ea06fd24..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/test_utils/test_multiclass.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import unittest
-
-import numpy as np
-from sklearn.base import BaseEstimator
-
-from multiview_platform.mono_multi_view_classifiers.utils.multiclass import get_mc_estim, \
-OVRWrapper, OVOWrapper, MultiviewOVOWrapper, MultiviewOVRWrapper
-
-class FakeMCEstim(BaseEstimator):
-
-    def __init__(self):
-        self.short_name="short_name"
-
-    def accepts_multi_class(self, random_state):
-        return False
-
-class FakeEstimNative(FakeMCEstim):
-
-    def accepts_multi_class(self, random_state):
-        return True
-
-
-class FakeNonProbaEstim(FakeMCEstim):
-    pass
-
-
-class FakeProbaEstim(FakeMCEstim):
-
-    def predict_proba(self):
-        pass
-
-
-class Test_get_mc_estim(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.y = cls.random_state.randint(0, 3, 10)
-
-    def test_biclass(self):
-        y = self.random_state.randint(0,2,10)
-        estimator="Test"
-        returned_estimator = get_mc_estim(estimator, self.random_state, y=y)
-        self.assertEqual(returned_estimator, estimator)
-
-    def test_multiclass_native(self):
-        estimator = FakeEstimNative()
-        returned_estimator = get_mc_estim(estimator, self.random_state, y=self.y)
-        self.assertIsInstance(returned_estimator, FakeEstimNative)
-
-    def test_multiclass_ovo(self):
-        estimator = FakeNonProbaEstim()
-        returned_estimator = get_mc_estim(estimator, self.random_state, y=self.y)
-        self.assertIsInstance(returned_estimator, OVOWrapper)
-
-    def test_multiclass_ovr(self):
-        estimator = FakeProbaEstim()
-        returned_estimator = get_mc_estim(estimator, self.random_state, y=self.y)
-        self.assertIsInstance(returned_estimator, OVRWrapper)
-
-    def test_multiclass_ovo_multiview(self):
-        estimator = FakeNonProbaEstim()
-        returned_estimator = get_mc_estim(estimator, self.random_state,
-                                          multiview=True, y=self.y, )
-        self.assertIsInstance(returned_estimator, MultiviewOVOWrapper)
-
-    def test_multiclass_ovr_multiview(self):
-        estimator = FakeProbaEstim()
-        returned_estimator = get_mc_estim(estimator, self.random_state,
-                                          multiview=True, y=self.y,)
-        self.assertIsInstance(returned_estimator, MultiviewOVRWrapper)
-
-class FakeMVClassifier(BaseEstimator):
-
-    def __init__(self, short_name="None"):
-        self.short_name = short_name
-
-    def fit(self, X, y, train_indices=None, view_indices=None):
-        self.n_classes = np.unique(y[train_indices]).shape[0]
-        self.views_indices = view_indices
-
-    def predict(self, X, example_indices=None, view_indices=None):
-        self.example_indices = example_indices
-        self.views_indices = view_indices
-        return np.zeros((example_indices.shape[0]))
-
-class FakeMVClassifierProb(FakeMVClassifier):
-
-    def predict_proba(self, X, example_indices=None, view_indices=None):
-        self.example_indices = example_indices
-        self.views_indices = view_indices
-        return np.zeros((example_indices.shape[0], 2))
-
-class Test_MultiviewOVRWrapper_fit(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.X = "dataset"
-        cls.n_classes=3
-        cls.y = cls.random_state.randint(0,cls.n_classes,50)
-        cls.train_indices = np.arange(25)
-        cls.example_indices = np.arange(25)+25
-        cls.view_indices="None"
-        cls.wrapper = MultiviewOVRWrapper(FakeMVClassifierProb(), )
-
-    def test_fit(self):
-        fitted = self.wrapper.fit(self.X, self.y, train_indices=self.train_indices,
-                                  view_indices=self.view_indices)
-        for estimator in fitted.estimators_:
-            self.assertEqual(estimator.n_classes,2)
-            self.assertEqual(estimator.views_indices, "None")
-
-    def test_predict(self):
-        fitted = self.wrapper.fit(self.X, self.y, train_indices=self.train_indices,
-                                  view_indices=self.view_indices)
-        pred = fitted.predict(self.X, example_indices=self.example_indices,
-                       view_indices=self.view_indices)
-        for estimator in fitted.estimators_:
-            np.testing.assert_array_equal(estimator.example_indices,
-                                          self.example_indices)
-
-
-class FakeDset:
-
-    def __init__(self, n_examples):
-        self.n_examples = n_examples
-
-    def get_nb_examples(self):
-        return self.n_examples
-
-class Test_MultiviewOVOWrapper_fit(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        cls.random_state = np.random.RandomState(42)
-        cls.n_examples=50
-        cls.X = FakeDset(n_examples=cls.n_examples)
-        cls.n_classes=3
-        cls.y = cls.random_state.randint(0,cls.n_classes,cls.n_examples)
-        cls.train_indices = np.arange(int(cls.n_examples/2))
-        cls.example_indices = np.arange(int(cls.n_examples/2))+int(cls.n_examples/2)
-        cls.view_indices="None"
-        cls.wrapper = MultiviewOVOWrapper(FakeMVClassifier(), )
-
-    def test_fit(self):
-        fitted = self.wrapper.fit(self.X, self.y, train_indices=self.train_indices,
-                                  view_indices=self.view_indices)
-        for estimator in fitted.estimators_:
-            self.assertEqual(estimator.n_classes,2)
-            self.assertEqual(estimator.views_indices, "None")
-
-    def test_predict(self):
-        fitted = self.wrapper.fit(self.X, self.y, train_indices=self.train_indices,
-                                  view_indices=self.view_indices)
-        pred = fitted.predict(self.X, example_indices=self.example_indices,
-                       view_indices=self.view_indices)
-        for estimator in fitted.estimators_:
-            np.testing.assert_array_equal(estimator.example_indices,
-                                          self.example_indices)
-
-
-if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
diff --git a/multiview_platform/tests/utils.py b/multiview_platform/tests/utils.py
deleted file mode 100644
index 9a3f04cb0aecb9ba34e7f5318d7f7bab4c81478d..0000000000000000000000000000000000000000
--- a/multiview_platform/tests/utils.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import numpy as np
-import h5py
-
-from ..mono_multi_view_classifiers.utils.dataset import HDF5Dataset
-
-
-tmp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "tmp_tests/")
-# TODO Convert to ram dataset
-test_dataset = HDF5Dataset(hdf5_file=h5py.File(os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_database.hdf5"), "r"))
-
-def rm_tmp(path=tmp_path):
-    try:
-        for file_name in os.listdir(path):
-            if os.path.isdir(os.path.join(path, file_name)):
-                rm_tmp(os.path.join(path, file_name))
-            else:
-                os.remove(os.path.join(path, file_name))
-        os.rmdir(path)
-    except:
-        pass
-
-
-def gen_test_dataset(random_state=np.random.RandomState(42)):
-    dataset_file = h5py.File("test_database.hdf5",  "w")
-    view_names = ["ViewN0", "ViewN1", "ViewN2"]
-    views = [random_state.randint(0,100,(5,6))
-             for _ in range(len(view_names))]
-    labels = random_state.randint(0,2, 5)
-    label_names = ["yes", "no"]
-    for view_index, (view_name, view) in enumerate(
-            zip(view_names, views)):
-        view_dataset = dataset_file.create_dataset("View" + str(view_index),
-                                                   view.shape,
-                                                   data=view)
-        view_dataset.attrs["name"] = view_name
-        view_dataset.attrs["sparse"] = False
-    labels_dataset = dataset_file.create_dataset("Labels",
-                                                 shape=labels.shape,
-                                                 data=labels)
-    labels_dataset.attrs["names"] = [label_name.encode()
-                                     if not isinstance(label_name, bytes)
-                                     else label_name
-                                     for label_name in label_names]
-    meta_data_grp = dataset_file.create_group("Metadata")
-    meta_data_grp.attrs["nbView"] = len(views)
-    meta_data_grp.attrs["nbClass"] = len(np.unique(labels))
-    meta_data_grp.attrs["datasetLength"] = len(labels)
-    dataset_file.close()
-
-
-if __name__ == "__main__":
-    gen_test_dataset()
diff --git a/setup.cfg b/setup.cfg
index 43c4b4ed043e81147260b3d5e1bdec505d7f19c9..5241fde359a3844ec560340808b7dbf99b16b170 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,14 +1,14 @@
 [tool:pytest]
-testpaths = multiview_platform
+testpaths = summit
 addopts = --cov-report=html
           --verbose
-          --cov=multiview_platform
+          --cov=summit
           --cov-report=term-missing
 ;          --cov-config setup.cfg
           --cache-clear
 
 [coverage:run]
-source = multiview_platform
+source = summit
 include = */mono_multi_view_classifiers/*
 omit = */tests/*
        */examples/*
diff --git a/setup.py b/setup.py
index 8c9bfe2f38516d714047e561c22eae08e0e2be5c..967438205fcd6b7cdea3b937533608447560d915 100644
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,7 @@ def setup_package():
     # La syntaxe est "nom-de-commande-a-creer = package.module:fonction".
     entry_points={
         'console_scripts': [
-            'exec_multiview = multiview_platform.execute:exec',
+            'exec_multiview = summit.execute:exec',
         ],
     },
 
@@ -96,7 +96,7 @@ def setup_package():
     # Il y a encore une chiée de paramètres possibles, mais avec ça vous
     # couvrez 90% des besoins
     # ext_modules=cythonize(
-    #     "multiview_platform/mono_multi_view_classifiers/monoview/additions/_custom_criterion.pyx"),
+    #     "summit/multiview_platform/monoview/additions/_custom_criterion.pyx"),
 )
 
 if __name__ == "__main__":