diff --git a/doc/index.rst b/doc/index.rst
index 8d2715a2f9ad8b6f5493666ef01a24d4ed86054c..be671efbd27f71e860005411f247ede66c350ef9 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -21,10 +21,10 @@ Documentation
 
 
 
-   reference/api
    tutorial/install_devel
    tutorial/auto_examples/index
    tutorial/times
+   reference/api
    tutorial/credits
 
 
diff --git a/doc/tutorial/auto_examples/auto_examples_jupyter.zip b/doc/tutorial/auto_examples/auto_examples_jupyter.zip
index a30ce0bbec3aa32afbeda1d6943444a8219a4ad9..b64aaf69af22de1387700bc0320bd337f4a21f66 100644
Binary files a/doc/tutorial/auto_examples/auto_examples_jupyter.zip and b/doc/tutorial/auto_examples/auto_examples_jupyter.zip differ
diff --git a/doc/tutorial/auto_examples/auto_examples_python.zip b/doc/tutorial/auto_examples/auto_examples_python.zip
index 455de39223fbe7205085e61edfda8b45f4bf4635..e9ec57bc9be6504979608c6aff072b19896c5c6f 100644
Binary files a/doc/tutorial/auto_examples/auto_examples_python.zip and b/doc/tutorial/auto_examples/auto_examples_python.zip differ
diff --git a/doc/tutorial/auto_examples/combo/sg_execution_times.rst b/doc/tutorial/auto_examples/combo/sg_execution_times.rst
index 70b3c32a0a0c84ad2aee34a5093e2e355779e190..58958daabc197a267c3fa7eb224ef8ebbc65c577 100644
--- a/doc/tutorial/auto_examples/combo/sg_execution_times.rst
+++ b/doc/tutorial/auto_examples/combo/sg_execution_times.rst
@@ -1,14 +1,14 @@
 
 :orphan:
 
-.. _sphx_glr_tutorial_auto_examples_cumbo_sg_execution_times:
+.. _sphx_glr_tutorial_auto_examples_combo_sg_execution_times:
 
 Computation times
 =================
-**00:01.102** total execution time for **tutorial_auto_examples_cumbo** files:
+**00:03.474** total execution time for **tutorial_auto_examples_combo** files:
 
 +--------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py` (``plot_cumbo_2_views_2_classes.py``) | 00:00.603 | 0.0 MB |
+| :ref:`sphx_glr_tutorial_auto_examples_combo_plot_combo_2_views_2_classes.py` (``plot_combo_2_views_2_classes.py``) | 00:02.387 | 0.0 MB |
 +--------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py` (``plot_cumbo_3_views_3_classes.py``) | 00:00.499 | 0.0 MB |
+| :ref:`sphx_glr_tutorial_auto_examples_combo_plot_combo_3_views_3_classes.py` (``plot_combo_3_views_3_classes.py``) | 00:01.088 | 0.0 MB |
 +--------------------------------------------------------------------------------------------------------------------+-----------+--------+
diff --git a/doc/tutorial/auto_examples/index.rst b/doc/tutorial/auto_examples/index.rst
index b2d29ff53e20f07f3a7d6efe81c7f9f99c6e23e5..ae4af5f17c90c7d2913059f96e26fd508d90f9c8 100644
--- a/doc/tutorial/auto_examples/index.rst
+++ b/doc/tutorial/auto_examples/index.rst
@@ -19,7 +19,7 @@ Multimodal Examples
 
 
 
-.. _sphx_glr_tutorial_auto_examples_cumbo:
+.. _sphx_glr_tutorial_auto_examples_combo:
 
 
 MuCuMBo Examples
@@ -37,9 +37,9 @@ cooperation between views for classification.
 
 .. only:: html
 
- .. figure:: /tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_2_views_2_classes_thumb.png
+ .. figure:: /tutorial/auto_examples/combo/images/thumb/sphx_glr_plot_combo_2_views_2_classes_thumb.png
 
-     :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_2_views_2_classes.py`
+     :ref:`sphx_glr_tutorial_auto_examples_combo_plot_combo_2_views_2_classes.py`
 
 .. raw:: html
 
@@ -49,7 +49,7 @@ cooperation between views for classification.
 .. toctree::
    :hidden:
 
-   /tutorial/auto_examples/cumbo/plot_cumbo_2_views_2_classes
+   /tutorial/auto_examples/combo/plot_combo_2_views_2_classes
 
 .. raw:: html
 
@@ -57,9 +57,9 @@ cooperation between views for classification.
 
 .. only:: html
 
- .. figure:: /tutorial/auto_examples/cumbo/images/thumb/sphx_glr_plot_cumbo_3_views_3_classes_thumb.png
+ .. figure:: /tutorial/auto_examples/combo/images/thumb/sphx_glr_plot_combo_3_views_3_classes_thumb.png
 
-     :ref:`sphx_glr_tutorial_auto_examples_cumbo_plot_cumbo_3_views_3_classes.py`
+     :ref:`sphx_glr_tutorial_auto_examples_combo_plot_combo_3_views_3_classes.py`
 
 .. raw:: html
 
@@ -69,7 +69,7 @@ cooperation between views for classification.
 .. toctree::
    :hidden:
 
-   /tutorial/auto_examples/cumbo/plot_cumbo_3_views_3_classes
+   /tutorial/auto_examples/combo/plot_combo_3_views_3_classes
 .. raw:: html
 
     <div class="sphx-glr-clear"></div>
@@ -242,13 +242,13 @@ The following toy examples illustrate how the multimodal as usecase on digit  da
 
 .. raw:: html
 
-    <div class="sphx-glr-thumbcontainer" tooltip="multi class digit from sklearn, multivue  - vue 0 digit data (color of sklearn)  - vue 1 gradia...">
+    <div class="sphx-glr-thumbcontainer" tooltip="Use Case MKL on digit">
 
 .. only:: html
 
- .. figure:: /tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMuCuBo_thumb.png
+ .. figure:: /tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png
 
-     :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMuCuBo.py`
+     :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py`
 
 .. raw:: html
 
@@ -258,17 +258,17 @@ The following toy examples illustrate how the multimodal as usecase on digit  da
 .. toctree::
    :hidden:
 
-   /tutorial/auto_examples/usecase/plot_usecase_exampleMuCuBo
+   /tutorial/auto_examples/usecase/plot_usecase_exampleMKL
 
 .. raw:: html
 
-    <div class="sphx-glr-thumbcontainer" tooltip="Use Case MKL on digit">
+    <div class="sphx-glr-thumbcontainer" tooltip="multi class digit from sklearn, multivue  - vue 0 digit data (color of sklearn)  - vue 1 gradia...">
 
 .. only:: html
 
- .. figure:: /tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png
+ .. figure:: /tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMuComBo_thumb.png
 
-     :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py`
+     :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMuComBo.py`
 
 .. raw:: html
 
@@ -278,7 +278,7 @@ The following toy examples illustrate how the multimodal as usecase on digit  da
 .. toctree::
    :hidden:
 
-   /tutorial/auto_examples/usecase/plot_usecase_exampleMKL
+   /tutorial/auto_examples/usecase/plot_usecase_exampleMuComBo
 .. raw:: html
 
     <div class="sphx-glr-clear"></div>
@@ -291,15 +291,15 @@ The following toy examples illustrate how the multimodal as usecase on digit  da
     :class: sphx-glr-footer-gallery
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-python
 
-    :download:`Download all examples in Python source code: auto_examples_python.zip <//home/dominique/projets/ANR-Lives/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_python.zip>`
+    :download:`Download all examples in Python source code: auto_examples_python.zip <//home/baptiste/Documents/Gitwork/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_python.zip>`
 
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-jupyter
 
-    :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip <//home/dominique/projets/ANR-Lives/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_jupyter.zip>`
+    :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip <//home/baptiste/Documents/Gitwork/scikit-multimodallearn/doc/tutorial/auto_examples/auto_examples_jupyter.zip>`
 
 
 .. only:: html
diff --git a/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst b/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst
index 7265f516e33b881ff71bdfa435fb9fb80cfb9afe..17e4f34a273540723f5af4bcd2680621fadb6a35 100644
--- a/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst
+++ b/doc/tutorial/auto_examples/mumbo/sg_execution_times.rst
@@ -3,8 +3,8 @@
 
 .. _sphx_glr_tutorial_auto_examples_mumbo_sg_execution_times:
 
-Computation times
-=================
+Mumbo computation times
+=======================
 **00:02.013** total execution time for **tutorial_auto_examples_mumbo** files:
 
 +--------------------------------------------------------------------------------------------------------------------+-----------+--------+
diff --git a/doc/tutorial/auto_examples/mvml/sg_execution_times.rst b/doc/tutorial/auto_examples/mvml/sg_execution_times.rst
index 8e1881205111d00d1af5490c16c12c1f031d0806..800c8d508d8954c0e6cab212e6e27fde6beb8e89 100644
--- a/doc/tutorial/auto_examples/mvml/sg_execution_times.rst
+++ b/doc/tutorial/auto_examples/mvml/sg_execution_times.rst
@@ -3,8 +3,8 @@
 
 .. _sphx_glr_tutorial_auto_examples_mvml_sg_execution_times:
 
-Computation times
-=================
+MVML computation times
+======================
 **00:03.630** total execution time for **tutorial_auto_examples_mvml** files:
 
 +-------------------------------------------------------------------------------+-----------+--------+
diff --git a/doc/tutorial/auto_examples/usecase/images/sphx_glr_plot_usecase_exampleMKL_001.png b/doc/tutorial/auto_examples/usecase/images/sphx_glr_plot_usecase_exampleMKL_001.png
index 7a4af0bbb0335a7e7faf2cd04239b98efc2f6c4d..8ee9e6ece17bf2aefb5559b92514037d31cbff64 100644
Binary files a/doc/tutorial/auto_examples/usecase/images/sphx_glr_plot_usecase_exampleMKL_001.png and b/doc/tutorial/auto_examples/usecase/images/sphx_glr_plot_usecase_exampleMKL_001.png differ
diff --git a/doc/tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png b/doc/tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png
index 585e9a3c73ec9b68bd0ee01c4862094392a1f44e..8f9b339fae34f852b721c8667d187741747a04bd 100644
Binary files a/doc/tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png and b/doc/tutorial/auto_examples/usecase/images/thumb/sphx_glr_plot_usecase_exampleMKL_thumb.png differ
diff --git a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.ipynb b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.ipynb
index e3fb15092c88f3346176a62b80f03ca65e485569..6c3070aa9dd0d14d42682f93740923630e0123ec 100644
--- a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.ipynb
+++ b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.ipynb
@@ -26,7 +26,7 @@
       },
       "outputs": [],
       "source": [
-        "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom multimodal.datasets.base import load_dict, save_dict\nfrom multimodal.tests.data.get_dataset_path import get_dataset_path\nfrom multimodal.datasets.data_sample import MultiModalArray\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.kernels.lpMKL import MKL\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib._color_data as mcd\n\n\ndef plot_subplot(X, Y, Y_pred, vue, subplot, title):\n    cn = mcd.CSS4_COLORS\n    classes = np.unique(Y)\n    n_classes = len(np.unique(Y))\n    axs = plt.subplot(subplot[0],subplot[1],subplot[2])\n    axs.set_title(title)\n    #plt.scatter(X._extract_view(vue), X._extract_view(vue), s=40, c='gray',\n    #            edgecolors=(0, 0, 0))\n    for index, k in zip(range(n_classes), cn.keys()):\n         Y_class, = np.where(Y==classes[index])\n         Y_class_pred = np.intersect1d(np.where(Y_pred==classes[index])[0], np.where(Y_pred==Y)[0])\n         plt.scatter(X._extract_view(vue)[Y_class],\n                     X._extract_view(vue)[Y_class],\n                     s=40, c=cn[k], edgecolors='blue', linewidths=2, label=\"class real class: \"+str(index)) #\n         plt.scatter(X._extract_view(vue)[Y_class_pred],\n                     X._extract_view(vue)[Y_class_pred],\n                     s=160, edgecolors='orange', linewidths=2, label=\"class prediction: \"+str(index))\n\n\nif __name__ == '__main__':\n    # file = get_dataset_path(\"digit_histogram.npy\")\n    file = get_dataset_path(\"digit_col_grad.npy\")\n    y = np.load(get_dataset_path(\"digit_y.npy\"))\n    base_estimator = DecisionTreeClassifier(max_depth=4)\n    dic_digit = load_dict(file)\n    XX =MultiModalArray(dic_digit)\n    X_train, X_test, y_train, y_test = train_test_split(XX, y)\n\n    est4 = OneVsOneClassifier(MKL(lmbda=0.1, nystrom_param=0.2)).fit(X_train, y_train)\n    y_pred4 = est4.predict(X_test)\n    y_pred44 = est4.predict(X_train)\n    print(\"result of MKL on digit with oneversone\")\n    result4 = np.mean(y_pred4.ravel() == y_test.ravel()) * 100\n    print(result4)\n\n    fig = plt.figure(figsize=(12., 11.))\n    fig.suptitle(\"MKL : result\" + str(result4), fontsize=16)\n    plot_subplot(X_train, y_train, y_pred44  ,0, (4, 1, 1), \"train vue 0 color\" )\n    plot_subplot(X_test, y_test,y_pred4 , 0, (4, 1, 2), \"test vue 0 color\" )\n    plot_subplot(X_test, y_test, y_pred4,1, (4, 1, 3), \"test vue 1 gradiant 0\" )\n    plot_subplot(X_test, y_test,y_pred4, 2, (4, 1, 4), \"test vue 2 gradiant 1\" )\n    # plt.legend()\n    plt.show()"
+        "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom multimodal.datasets.base import load_dict, save_dict\nfrom multimodal.tests.data.get_dataset_path import get_dataset_path\nfrom multimodal.datasets.data_sample import MultiModalArray\nfrom multimodal.kernels.lpMKL import MKL\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib._color_data as mcd\n\n\ndef plot_subplot(X, Y, Y_pred, vue, subplot, title):\n    cn = mcd.CSS4_COLORS\n    classes = np.unique(Y)\n    n_classes = len(np.unique(Y))\n    axs = plt.subplot(subplot[0],subplot[1],subplot[2])\n    axs.set_title(title)\n    #plt.scatter(X._extract_view(vue), X._extract_view(vue), s=40, c='gray',\n    #            edgecolors=(0, 0, 0))\n    for index, k in zip(range(n_classes), cn.keys()):\n         Y_class, = np.where(Y==classes[index])\n         Y_class_pred = np.intersect1d(np.where(Y_pred==classes[index])[0], np.where(Y_pred==Y)[0])\n         plt.scatter(X._extract_view(vue)[Y_class],\n                     X._extract_view(vue)[Y_class],\n                     s=40, c=cn[k], edgecolors='blue', linewidths=2, label=\"class real class: \"+str(index)) #\n         plt.scatter(X._extract_view(vue)[Y_class_pred],\n                     X._extract_view(vue)[Y_class_pred],\n                     s=160, edgecolors='orange', linewidths=2, label=\"class prediction: \"+str(index))\n\n\nif __name__ == '__main__':\n    # file = get_dataset_path(\"digit_histogram.npy\")\n    file = get_dataset_path(\"digit_col_grad.npy\")\n    y = np.load(get_dataset_path(\"digit_y.npy\"))\n    dic_digit = load_dict(file)\n    XX =MultiModalArray(dic_digit)\n    X_train, X_test, y_train, y_test = train_test_split(XX, y)\n\n    est4 = OneVsOneClassifier(MKL(lmbda=0.1, nystrom_param=0.2)).fit(X_train, y_train)\n    y_pred4 = est4.predict(X_test)\n    y_pred44 = est4.predict(X_train)\n    print(\"result of MKL on digit with oneversone\")\n    result4 = np.mean(y_pred4.ravel() == y_test.ravel()) * 100\n    print(result4)\n\n    fig = plt.figure(figsize=(12., 11.))\n    fig.suptitle(\"MKL : result\" + str(result4), fontsize=16)\n    plot_subplot(X_train, y_train, y_pred44  ,0, (4, 1, 1), \"train vue 0 color\" )\n    plot_subplot(X_test, y_test,y_pred4 , 0, (4, 1, 2), \"test vue 0 color\" )\n    plot_subplot(X_test, y_test, y_pred4,1, (4, 1, 3), \"test vue 1 gradiant 0\" )\n    plot_subplot(X_test, y_test,y_pred4, 2, (4, 1, 4), \"test vue 2 gradiant 1\" )\n    # plt.legend()\n    plt.show()"
       ]
     }
   ],
diff --git a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py
index 27d6fbf446dd4ebbcc065836401d8cc387770b1e..600c566af889e1b113cbc290184995e024ceaa3a 100644
--- a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py
+++ b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py
@@ -19,7 +19,6 @@ from sklearn.tree import DecisionTreeClassifier
 from multimodal.datasets.base import load_dict, save_dict
 from multimodal.tests.data.get_dataset_path import get_dataset_path
 from multimodal.datasets.data_sample import MultiModalArray
-from multimodal.kernels.mvml import MVML
 from multimodal.kernels.lpMKL import MKL
 
 import numpy as np
@@ -50,7 +49,6 @@ if __name__ == '__main__':
     # file = get_dataset_path("digit_histogram.npy")
     file = get_dataset_path("digit_col_grad.npy")
     y = np.load(get_dataset_path("digit_y.npy"))
-    base_estimator = DecisionTreeClassifier(max_depth=4)
     dic_digit = load_dict(file)
     XX =MultiModalArray(dic_digit)
     X_train, X_test, y_train, y_test = train_test_split(XX, y)
diff --git a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py.md5 b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py.md5
index eac7aeeb35124c4b624df10115ac5d442cc77a2a..7514d3cd85f9c58ca8a2a5304f3f92bc6cf20907 100644
--- a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py.md5
+++ b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.py.md5
@@ -1 +1 @@
-f7b5c3f0fd24e4628f03aa7019eea376
\ No newline at end of file
+3360d3ee5508f0e16023ee336767f17c
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.rst b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.rst
index 8bde3b61aa649683c405f98ed2af1df39edcfef3..cdb03dbd7ecace5892f04896428a723fad3593b9 100644
--- a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.rst
+++ b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL.rst
@@ -1,10 +1,12 @@
-.. note::
-    :class: sphx-glr-download-link-note
+.. only:: html
+
+    .. note::
+        :class: sphx-glr-download-link-note
 
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
+        Click :ref:`here <sphx_glr_download_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py>`     to download the full example code
+    .. rst-class:: sphx-glr-example-title
 
-.. _sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py:
+    .. _sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py:
 
 
 =====================
@@ -30,8 +32,8 @@ multi class digit from sklearn, multivue
  .. code-block:: none
 
     result of MKL on digit with oneversone
-    96.88888888888889
-    /home/dominique/projets/ANR-Lives/scikit-multimodallearn/examples/usecase/plot_usecase_exampleMKL.py:72: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.
+    97.77777777777777
+    /home/baptiste/Documents/Gitwork/scikit-multimodallearn/examples/usecase/plot_usecase_exampleMKL.py:70: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.
       plt.show()
 
 
@@ -53,7 +55,6 @@ multi class digit from sklearn, multivue
     from multimodal.datasets.base import load_dict, save_dict
     from multimodal.tests.data.get_dataset_path import get_dataset_path
     from multimodal.datasets.data_sample import MultiModalArray
-    from multimodal.kernels.mvml import MVML
     from multimodal.kernels.lpMKL import MKL
 
     import numpy as np
@@ -84,7 +85,6 @@ multi class digit from sklearn, multivue
         # file = get_dataset_path("digit_histogram.npy")
         file = get_dataset_path("digit_col_grad.npy")
         y = np.load(get_dataset_path("digit_y.npy"))
-        base_estimator = DecisionTreeClassifier(max_depth=4)
         dic_digit = load_dict(file)
         XX =MultiModalArray(dic_digit)
         X_train, X_test, y_train, y_test = train_test_split(XX, y)
@@ -109,7 +109,7 @@ multi class digit from sklearn, multivue
 
 .. rst-class:: sphx-glr-timing
 
-   **Total running time of the script:** ( 0 minutes  20.457 seconds)
+   **Total running time of the script:** ( 1 minutes  59.263 seconds)
 
 
 .. _sphx_glr_download_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py:
@@ -122,13 +122,13 @@ multi class digit from sklearn, multivue
 
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-python
 
      :download:`Download Python source code: plot_usecase_exampleMKL.py <plot_usecase_exampleMKL.py>`
 
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-jupyter
 
      :download:`Download Jupyter notebook: plot_usecase_exampleMKL.ipynb <plot_usecase_exampleMKL.ipynb>`
 
diff --git a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL_codeobj.pickle b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL_codeobj.pickle
index a96580e139407f646205c7b6906300470ac0e7b7..a00d9158d9484b7cc06bd5b10955741cefaf0e5d 100644
Binary files a/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL_codeobj.pickle and b/doc/tutorial/auto_examples/usecase/plot_usecase_exampleMKL_codeobj.pickle differ
diff --git a/doc/tutorial/auto_examples/usecase/sg_execution_times.rst b/doc/tutorial/auto_examples/usecase/sg_execution_times.rst
index 1150b05363bcb88291d08c5cc1244c9ea66b14d4..f8c5f4ee2c40c7b18a942f9d2fbac6050f5a308d 100644
--- a/doc/tutorial/auto_examples/usecase/sg_execution_times.rst
+++ b/doc/tutorial/auto_examples/usecase/sg_execution_times.rst
@@ -5,16 +5,16 @@
 
 Computation times
 =================
-**01:55.487** total execution time for **tutorial_auto_examples_usecase** files:
+**02:26.402** total execution time for **tutorial_auto_examples_usecase** files:
 
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMVML.py` (``plot_usecase_exampleMVML.py``)     | 01:14.485 | 0.0 MB |
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py` (``plot_usecase_exampleMKL.py``)       | 00:20.457 | 0.0 MB |
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMuCuBo.py` (``plot_usecase_exampleMuCuBo.py``) | 00:14.171 | 0.0 MB |
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMumBo.py` (``plot_usecase_exampleMumBo.py``)   | 00:06.374 | 0.0 MB |
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_tutorial_auto_examples_usecase_usecase_function.py` (``usecase_function.py``)                     | 00:00.000 | 0.0 MB |
-+------------------------------------------------------------------------------------------------------------------+-----------+--------+
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMKL.py` (``plot_usecase_exampleMKL.py``)         | 01:59.263 | 0.0 MB |
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMuComBo.py` (``plot_usecase_exampleMuComBo.py``) | 00:27.139 | 0.0 MB |
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMVML.py` (``plot_usecase_exampleMVML.py``)       | 00:00.000 | 0.0 MB |
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_tutorial_auto_examples_usecase_plot_usecase_exampleMumBo.py` (``plot_usecase_exampleMumBo.py``)     | 00:00.000 | 0.0 MB |
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_tutorial_auto_examples_usecase_usecase_function.py` (``usecase_function.py``)                       | 00:00.000 | 0.0 MB |
++--------------------------------------------------------------------------------------------------------------------+-----------+--------+
diff --git a/doc/tutorial/auto_examples/usecase/usecase_function.rst b/doc/tutorial/auto_examples/usecase/usecase_function.rst
index 41f67ddd4465c6217a093f1e73cb22b5a7a7ae54..54decb5e38103b7ae928825cc00da56373d588ed 100644
--- a/doc/tutorial/auto_examples/usecase/usecase_function.rst
+++ b/doc/tutorial/auto_examples/usecase/usecase_function.rst
@@ -1,10 +1,12 @@
-.. note::
-    :class: sphx-glr-download-link-note
+.. only:: html
+
+    .. note::
+        :class: sphx-glr-download-link-note
 
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_usecase_usecase_function.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
+        Click :ref:`here <sphx_glr_download_tutorial_auto_examples_usecase_usecase_function.py>`     to download the full example code
+    .. rst-class:: sphx-glr-example-title
 
-.. _sphx_glr_tutorial_auto_examples_usecase_usecase_function.py:
+    .. _sphx_glr_tutorial_auto_examples_usecase_usecase_function.py:
 
 
 ========================
@@ -60,13 +62,13 @@ Function plot_subplot
 
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-python
 
      :download:`Download Python source code: usecase_function.py <usecase_function.py>`
 
 
 
-  .. container:: sphx-glr-download
+  .. container:: sphx-glr-download sphx-glr-download-jupyter
 
      :download:`Download Jupyter notebook: usecase_function.ipynb <usecase_function.ipynb>`
 
diff --git a/doc/tutorial/auto_examples/usecase/usecase_function_codeobj.pickle b/doc/tutorial/auto_examples/usecase/usecase_function_codeobj.pickle
index ad823d70d0d8c15936fac37922405dedd5018905..506fb835d02b15cbc7629446f1d78ffd762d0ee5 100644
Binary files a/doc/tutorial/auto_examples/usecase/usecase_function_codeobj.pickle and b/doc/tutorial/auto_examples/usecase/usecase_function_codeobj.pickle differ
diff --git a/doc/tutorial/estimator_template.rst b/doc/tutorial/estimator_template.rst
new file mode 100644
index 0000000000000000000000000000000000000000..89a4cd9530ddd2194260927f9f68321180d2da1f
--- /dev/null
+++ b/doc/tutorial/estimator_template.rst
@@ -0,0 +1,129 @@
+
+.. _estim-template:
+
+Estimator template
+==================
+
+To add a multimodal estimator based on the groundwork of scikit-multimodallearn,
+please feel free to use the following template, while complying with the
+`Developer's Guide <http://scikit-learn.org/stable/developers>`_ of the
+scikit-learn project to ensure full compatibility.
+
+
+
+.. code-block:: default
+
+    import numpy as np
+    from sklearn.base import ClassifierMixin, BaseEstimator
+    from sklearn.utils import check_X_y
+    from sklearn.utils.multiclass import check_classification_targets
+    from sklearn.utils.validation import check_is_fitted
+    from multimodal.boosting.boost import UBoosting
+
+
+    class NewMultiModalEstimator(BaseEstimator, ClassifierMixin, UBoosting):
+        r""""
+        Your documentation
+        """
+
+        def __init__(self, your_attributes=None, ):
+            self.your_attributes = your_attributes
+
+        def fit(self, X, y, views_ind=None):
+            """Build a multimodal classifier from the training set (X, y).
+
+            Parameters
+            ----------
+            X : dict dictionary with all views
+                or
+                `MultiModalData` ,  `MultiModalArray`, `MultiModalSparseArray`
+                or
+                {array-like, sparse matrix}, shape = (n_samples, n_features)
+                Training multi-view input samples.
+                Sparse matrix can be CSC, CSR, COO, DOK, or LIL.
+                COO, DOK and LIL are converted to CSR.
+
+            y : array-like, shape = (n_samples,)
+                Target values (class labels).
+
+            views_ind : array-like (default=[0, n_features//2, n_features])
+                Paramater specifying how to extract the data views from X:
+
+                - If views_ind is a 1-D array of sorted integers, the entries
+                  indicate the limits of the slices used to extract the views,
+                  where view ``n`` is given by
+                  ``X[:, views_ind[n]:views_ind[n+1]]``.
+
+                  With this convention each view is therefore a view (in the NumPy
+                  sense) of X and no copy of the data is done.
+
+                - If views_ind is an array of arrays of integers, then each array
+                  of integers ``views_ind[n]`` specifies the indices of the view
+                  ``n``, which is then given by ``X[:, views_ind[n]]``.
+
+                  With this convention each view creates therefore a partial copy
+                  of the data in X. This convention is thus more flexible but less
+                  efficient than the previous one.
+
+            Returns
+            -------
+            self : object
+                Returns self.
+
+            Raises
+            ------
+            ValueError  estimator must support sample_weight
+
+            ValueError where `X` and `view_ind` are not compatibles
+            """
+
+            # _global_X_transform processes the multimodal dataset to transform the
+            # in the MultiModalArray format.
+            self.X_ = self._global_X_transform(X, views_ind=views_ind)
+
+            # Ensure proper format for views_ind and return number of views.
+            views_ind_, n_views = self.X_._validate_views_ind(self.X_.views_ind,
+                                                              self.X_.shape[1])
+
+            # According to scikit learn guidelines.
+            check_X_y(self.X_, y)
+            if not isinstance(y, np.ndarray):
+                y = np.asarray(y)
+            check_classification_targets(y)
+            self._validate_estimator()
+
+            return self
+
+
+        def predict(self, X):
+            """Predict classes for X.
+
+            Parameters
+            ----------
+            X : {array-like, sparse matrix}, shape = (n_samples, n_features)
+                Multi-view input samples.
+                Sparse matrix can be CSC, CSR, COO, DOK, or LIL.
+                COO, DOK and LIL are converted to CSR.
+
+            Returns
+            -------
+            y : numpy.ndarray, shape = (n_samples,)
+                Predicted classes.
+
+            Raises
+            ------
+            ValueError   'X' input matrix must be have the same total number of features
+                         of 'X' fit data
+            """
+            # According to scikit learn guidelines
+            check_is_fitted(self, ("your_attributes"))
+
+            # _global_X_transform processes the multimodal dataset to transform the
+            # in the MultiModalArray format.
+            X = self._global_X_transform(X, views_ind=self.X_.views_ind)
+
+            # Ensure that X is in the proper format.
+            X = self._validate_X_predict(X)
+
+            # Returning fake multi-class labels
+            return np.random.randint(0, 5, size=X.shape[0])
\ No newline at end of file
diff --git a/doc/tutorial/install_devel.rst b/doc/tutorial/install_devel.rst
index 215d921e6ff5e0ae6976c11137006e613fc37d49..2df1a317954b99b33df6da0151a0aecc41d76471 100644
--- a/doc/tutorial/install_devel.rst
+++ b/doc/tutorial/install_devel.rst
@@ -38,7 +38,8 @@ The development of scikit-multimodallearn follows the guidelines provided by the
 scikit-learn community.
 
 Refer to the `Developer's Guide <http://scikit-learn.org/stable/developers>`_
-of the scikit-learn project for more details.
+of the scikit-learn project for general details. Expanding the library can be
+done by following the template provided in :ref:`estim-template` .
 
 Source code
 -----------
diff --git a/doc/tutorial/times.rst b/doc/tutorial/times.rst
index 1c70dd62db09d7041fcadc22cab036e5c071f29e..23955f196336cdb7fb08010c03ef312517cebafe 100644
--- a/doc/tutorial/times.rst
+++ b/doc/tutorial/times.rst
@@ -3,7 +3,7 @@
 Computation times
 =================
 
-total execution time for **tutorial_auto_examples** files:
+Total execution time for **tutorial_auto_examples** files:
 
 
 .. toctree::
diff --git a/multimodal/__init__.py b/multimodal/__init__.py
index e851e58bf4c7b631d2a67f501bb74c7a2b0a8217..6c8e6b979c5f58121ac7ee2d9e024749da3a8ce1 100644
--- a/multimodal/__init__.py
+++ b/multimodal/__init__.py
@@ -1 +1 @@
-__version__ = "0.0.dev0"
+__version__ = "0.0.0"
diff --git a/multimodal/boosting/boost.py b/multimodal/boosting/boost.py
index bab7c806721c891bec828f7c25ff7864a734d366..bf1f743604e526c765791d7de1164d04b3f5ca61 100644
--- a/multimodal/boosting/boost.py
+++ b/multimodal/boosting/boost.py
@@ -110,17 +110,10 @@ class UBoosting(metaclass=ABCMeta):
         return X
 
     def _global_X_transform(self, X, views_ind=None):
-        X_ = None
         if isinstance(X, MultiModalData):
             X_ = X
         elif isinstance(X, sp.spmatrix):
             X_ = MultiModalSparseArray(X, views_ind)
         else:
             X_ = MultiModalArray(X, views_ind)
-        # if not isinstance(X_, MultiModalData):
-        #     try:
-        #         X_ = np.asarray(X)
-        #         X_ = MultiModalArray(X_)
-        #     except Exception as e:
-        #         raise TypeError('Reshape your data')
         return X_
diff --git a/multimodal/boosting/combo.py b/multimodal/boosting/combo.py
index 5c0771936d7ce7abb6157f9b0c46d714edf04d9c..bad1f32ce2c786bc7a95b6631ce6f3b16af0ac9d 100644
--- a/multimodal/boosting/combo.py
+++ b/multimodal/boosting/combo.py
@@ -53,7 +53,7 @@ from sklearn.metrics import accuracy_score
 from sklearn.tree import DecisionTreeClassifier
 from sklearn.tree._tree import DTYPE
 from sklearn.tree import BaseDecisionTree
-from sklearn.utils import check_array, check_X_y, check_random_state
+from sklearn.utils import check_X_y, check_random_state
 from sklearn.utils.multiclass import check_classification_targets
 from sklearn.utils.validation import check_is_fitted, has_fit_parameter
 from cvxopt import solvers, matrix, spdiag, exp, spmatrix, mul, div
diff --git a/multimodal/datasets/data_sample.py b/multimodal/datasets/data_sample.py
index a4b5f9c310c9cb0ce314e682074f17b5da62b3f1..b65fe0ee134b17a5e00709bf6bce2bbe23c55f3b 100644
--- a/multimodal/datasets/data_sample.py
+++ b/multimodal/datasets/data_sample.py
@@ -133,7 +133,6 @@ class MultiModalData(metaclass=ABCMeta):
 
     def _validate_views_ind(self, views_ind, n_features):
         """Ensure proper format for views_ind and return number of views."""
-        # views_ind = np.array(views_ind)
         if np.issubdtype(views_ind.dtype, np.integer) and views_ind.ndim == 1:
             if len(views_ind) > 2 and np.any(views_ind[:-1] >= views_ind[1:]):
                 raise ValueError("Values in views_ind must be sorted.")
@@ -180,7 +179,6 @@ class MultiModalSparseInfo():
         new_data = np.ndarray([])
         n_views = data.size
         thekeys = None
-        # views_ind_self =  None
         view_mode = 'slices'
 
         if (sp.issparse(data)) and data.ndim > 1:
@@ -196,7 +194,6 @@ class MultiModalSparseInfo():
                     views_ind = np.array([0, data.shape[1]])
 
             new_data = data
-            # views_ind_self = views_ind
         views_ind, n_views, view_mode = self._first_validate_views_ind(views_ind,
                                                                       data.shape[1])
         if views_ind.ndim == 1 and view_mode.startswith("slicing"):
@@ -365,8 +362,6 @@ class MultiModalArray(np.ndarray, MultiModalData):
                 views_ind.append(dat_values.shape[1] + views_ind[index])
                 index += 1
             thekeys = data.keys()
-            # if new_data.ndim < 2 :
-            #     raise ValueError('Reshape your data')
             if new_data.ndim > 1 and (new_data.shape == (1, 1) or new_data.shape == ()):
                 raise ValueError('Reshape your data')
         elif isinstance(data, np.ndarray) and views_ind is None and data.ndim == 1:
@@ -421,21 +416,11 @@ class MultiModalArray(np.ndarray, MultiModalData):
                 raise ValueError('Reshape your data')
             if  new_data.ndim > 1 and (new_data.shape == (1, 1) or new_data.shape == ()):
                 raise ValueError('Reshape your data')
-            # if views_ind.ndim < 2 and new_data.ndim < 2 and views_ind[-1] > new_data.shape[1]:
-            #     raise ValueError('Reshape your data')
-
-            # views_ind_self = views_ind
-        # if new_data.shape[1] < 1:
-        #     msg = ("%d feature\(s\) \\(shape=\%s\) while a minimum of \\d* "
-        #            "is required.") % (new_data.shape[1], str(new_data.shape))
-        #     # "%d feature\(s\) \(shape=\(%d, %d\)\) while a minimum of \d* is required." % (new_data.shape[1], new_data.shape[0], new_data.shape[1])
-        #     raise ValueError(msg)
+
         views_ind, n_views, view_mode = cls._first_validate_views_ind(views_ind,
                                                                       new_data.shape[1])
         if views_ind.ndim == 1 and view_mode.startswith("slices"):
             shapes_int = [in2 - in1 for in1, in2 in zip(views_ind, views_ind[1:])]
-        # obj =   ma.MaskedArray.__new(new_data)   # new_data.view()  a.MaskedArray(new_data, mask=new_data.mask).view(cls)
-        # bj = super(Metriclearn_array, cls).__new__(cls, new_data.data, new_data.mask)
 
         if hasattr(new_data, "mask"):  # pragma: no cover
             obj = ma.masked_array(new_data.data, new_data.mask).view(cls)
@@ -448,7 +433,6 @@ class MultiModalArray(np.ndarray, MultiModalData):
         obj.views_ind = views_ind
         obj.shapes_int = shapes_int
         obj.n_views = n_views
-        # obj.keys = thekeys
         return obj
 
     @staticmethod
@@ -478,8 +462,7 @@ class MultiModalArray(np.ndarray, MultiModalData):
                   isinstance(dat_values, np.ndarray) or sp.issparse(dat_values):
                 new_data = dat_values
             else:
-                new_data = dat_values.view(np.ndarray) #  ma.masked_array(dat_values, mask=ma.nomask) dat_values.view(ma.MaskedArray) #(
-                # new_data.mask = ma.nomask
+                new_data = dat_values.view(np.ndarray)
         else:
             if isinstance(dat_values, np.ndarray):
                 new_data = np.hstack((new_data, dat_values))
@@ -488,15 +471,13 @@ class MultiModalArray(np.ndarray, MultiModalData):
             elif sp.issparse(dat_values):
                 new_data = sp.hstack((new_data, dat_values))
             else:
-                new_data = np.hstack((new_data,  dat_values.view(np.ndarray) ) ) #  ma.masked_array(dat_values, mask=ma.nomask
+                new_data = np.hstack((new_data,  dat_values.view(np.ndarray) ) )
         return new_data
 
     def __array_finalize__(self, obj):
         if obj is None: return
-        # super(MultiModalArray, self).__array_finalize__(obj)
         self.shapes_int = getattr(obj, 'shapes_int', None)
         self.n_views = getattr(obj, 'n_views', None)
-        # self.keys = getattr(obj, 'keys', None)
         self.views_ind = getattr(obj, 'views_ind', None)
         self.view_mode_ = getattr(obj, 'view_mode_', None)
 
@@ -537,22 +518,6 @@ class MultiModalArray(np.ndarray, MultiModalData):
         stop = int(np.sum(np.asarray(self.shapes_int[0: view+1])))
         return self[row, start:stop]
 
-    # def add_view(self, data):
-    #     if len(self.shape) > 0:
-    #         if data.shape[0] == self.data.shape[0]:
-    #             print(self.data.shape, data.shape)
-    #             new_data = np.hstack((self.data, data))
-    #             self.shapes_int.append(data.shape[1])
-    #             self.n_views +=1
-    #             print(new_data.shape)
-    #
-    #     else:
-    #         raise ValueError("New view can't initialazed")
-    #        # self.shapes_int= [data.shape[1]]
-    #        # self.data.reshape(data.shape[0],)
-    #        # np.insert(self.data, data, 0)
-    #        # self.n_views = 1
-
     def _todict(self):
         dico = {}
         for view in range(self.n_views):
@@ -560,8 +525,6 @@ class MultiModalArray(np.ndarray, MultiModalData):
         return dico
 
 
-
-
 class DataSample(dict):
     """
     A DataSample instance
@@ -596,7 +559,6 @@ class DataSample(dict):
 
     def __init__(self, data=None, **kwargs):
 
-
         # The dictionary that contains the sample
         super(DataSample, self).__init__(kwargs)
         self._data = None # Metriclearn_arrayMultiModalArray(np.zeros((0,0)))
diff --git a/multimodal/kernels/lpMKL.py b/multimodal/kernels/lpMKL.py
index 12f6901b6d2a3678034687c8b2b6f6a4418edbf7..7e2c7e276a88d66831fa56646ef8b60d2f0a9934 100644
--- a/multimodal/kernels/lpMKL.py
+++ b/multimodal/kernels/lpMKL.py
@@ -176,7 +176,6 @@ class MKL(BaseEstimator, ClassifierMixin, MKernel):
             self.regression_ = True
         else:
             raise ValueError("MKL algorithms is a binary classifier")
-                            # " or performs regression with float target")
         self.y_ = y
         n = self.K_.shape[0]
         self._calc_nystrom(self.K_, n)
@@ -233,7 +232,7 @@ class MKL(BaseEstimator, ClassifierMixin, MKernel):
             # alpha fixed -> calculate gammas
             weights_old = weights.copy()
 
-            # first the ||f_t||^2 todo wtf is the formula used here????
+            # first the ||f_t||^2 todo what is the formula used here ?
             ft2 = np.zeros(views)
             for v in range(0, views):
                 if self.nystrom_param < 1 and self.use_approx:
@@ -274,16 +273,9 @@ class MKL(BaseEstimator, ClassifierMixin, MKernel):
                 stuck = True
 
             max_diff = np.max([max_diff_gamma, diff_alpha])
-            # print([max_diff_gamma, diff_alpha])  # print if convergence is interesting
             C_old = C.copy()
             rounds = rounds + 1
-        # print("\nlearned the weights:")
-        # np.set_printoptions(precision=3, suppress=True)
-        # print(weights)
-        # print("")
 
-        # print if resulting convergence is of interest
-        # print("convergence of ", max_diff, " at step ", rounds, "/500")
         if stuck:
             return C_old, weights_old
         else:
diff --git a/multimodal/kernels/mkernel.py b/multimodal/kernels/mkernel.py
index 2b8d6d5f4ea94911f1122d0fb1e0c8ae963c8681..4424867767218f469bd6fea952b49f8032b83484 100644
--- a/multimodal/kernels/mkernel.py
+++ b/multimodal/kernels/mkernel.py
@@ -82,7 +82,6 @@ class MKernel(metaclass=ABCMeta):
         elif isinstance(self.kernel, list):
             ind = min(v, len(self.kernel) - 1)
             met = self.kernel[ind]
-        # Y,
         return pairwise_kernels(X, Y, metric=met,
                                 filter_params=True, **params)
 
@@ -113,7 +112,6 @@ class MKernel(metaclass=ABCMeta):
         """
         kernel_dict = {}
         flag_sparse = False
-        X_ = None
         y = None
         if Y is None:
             y = Y
@@ -124,22 +122,10 @@ class MKernel(metaclass=ABCMeta):
             X = X_
         if isinstance(X, MultiModalArray):
             X_ = X
-        # if not isinstance(X_, MultiModalArray):
-        #     try:
-        #         X_ = np.asarray(X)
-        #         X_ = MultiModalArray(X_, views_ind)
-        #     except Exception as e:
-        #         pass
-                # raise TypeError('Reshape your data')
         if isinstance(X_, MultiModalArray):
             for v in range(X.n_views):
                 if Y is not None:   y = Y.get_view(v) # y = self._global_check_pairwise(X, Y, v)
                 kernel_dict[v] = self._get_kernel(X_.get_view(v), y)
-
-        # if not isinstance(X_, MultiModalArray):
-        #     if sp.sparse.issparse(X):
-        #         raise TypeError("Nonsensical Error: no sparse data are allowed as input")
-        #     raise TypeError('Reshape your data')
         K_ = MultiModalArray(kernel_dict)
         return X_, K_
 
diff --git a/multimodal/kernels/mvml.py b/multimodal/kernels/mvml.py
index 8444ff3636f3cd0bc10b495f30cfabfe93da6646..92b5b3eadddb7edbe528bbb74cc023cfb0bc7984 100644
--- a/multimodal/kernels/mvml.py
+++ b/multimodal/kernels/mvml.py
@@ -166,7 +166,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
         self.nystrom_param = nystrom_param
         self.lmbda = lmbda
         self.eta = eta
-        # self.regression_params = regression_params
         self.learn_A = learn_A
         self.learn_w = learn_w
         self.n_loops = n_loops
@@ -226,10 +225,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
         self.regression_ = False
         self.X_, self.K_= self._global_kernel_transform(X, views_ind=views_ind)
         check_X_y(self.X_, y)
-        # if type_of_target(y) not in "binary":
-        #     raise ValueError("target should be binary")
-
-
 
         if type_of_target(y) in "binary":
             check_classification_targets(y)
@@ -244,7 +239,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
                              " or performs regression with float target")
         self.y_ = y
 
-        # n = X[0].shape[0]
         n = self.K_.shape[0]
         self.n_approx = int(np.floor(self.nystrom_param * n))  # number of samples in approximation, equals n if no approx.
         if self.nystrom_param < 1:
@@ -257,7 +251,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
         if self.warning_message:
             import logging
             logging.warning("warning appears during fit process" + str(self.warning_message))
-            # print("warning appears during fit process", self.warning_message)
         return self
 
     def _learn_mvml(self, learn_A=1, learn_w=0, n_loops=6):
@@ -335,11 +328,13 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
 
             # first invert A
             try:
+                # Changed because of numerical instability
                 # A_inv = np.linalg.pinv(A + 1e-09 * np.eye(views * self.n_approx))
                 cond_A = np.linalg.cond(A + 1e-08 * np.eye(views * self.n_approx))
                 if cond_A < 10:
                     A_inv = spli.pinv(A + 1e-8 * np.eye(views * self.n_approx))
                 else:
+                    # Changed because of numerical instability
                     # A_inv = self._inverse_precond_LU(A + 1e-8 * np.eye(views * self.n_approx), pos="precond_A") # self._inverse_precond_jacobi(A + 1e-8 * np.eye(views * self.n_approx), pos="precond_A")
                     A_inv = self._inv_best_precond(A + 1e-8 * np.eye(views * self.n_approx), pos="precond_A")
             except spli.LinAlgError:  # pragma: no cover
@@ -355,7 +350,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
             except ValueError:  # pragma: no cover
                 self.warning_message["ValueError"] = self.warning_message.get("ValueError", 0) + 1
                 return A_prev, g_prev, w_prev
-            # print("A_inv ",np.sum(A_inv))
             # then calculate g (block-sparse multiplications in loop) using A_inv
             for v in range(views):
                 for vv in range(views):
@@ -365,11 +359,13 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
                                       vv * self.n_approx:(vv + 1) * self.n_approx]
                 g[v * self.n_approx:(v + 1) * self.n_approx, 0] = np.dot(w[v] * np.transpose(self.U_dict[v]), self.y_)
             try:
+                # Changed because of numerical instability
                 # minA_inv = np.min(np.absolute(A_inv)) , rcond=self.r_cond*minA_inv
                 # here A_inv isn't actually inverse of A (changed in above loop)
                 if np.linalg.cond(A_inv) < 10:
                    g = np.dot(spli.pinv(A_inv), g)
                 else:
+                    # Changed because of numerical instability
                     # g = np.dot(self._inverse_precond_LU(A_inv, pos="precond_A_1"), g)
                     g = np.dot(self._inv_best_precond(A_inv, pos="precond_A_1"), g)
             except spli.LinAlgError:  # pragma: no cover
@@ -413,9 +409,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
         Pm, L, U = spli.lu(A)
         M = spli.inv(np.dot(L, U))
         Pre_lu = np.dot(M, A)
-        # print("cond a", np.linalg.cond(A))
-        # print("cond Pre_J", np.linalg.cond(Pre_J))
-        # print("cond Pre_lu", np.linalg.cond(Pre_lu))
         if np.linalg.cond(A) > np.linalg.cond(Pre_J) and np.linalg.cond(Pre_J) <= np.linalg.cond(Pre_lu):
             P_inv = spli.pinv(Pre_J)
             A_inv = np.dot(P_inv,  J_1)
@@ -430,7 +423,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
 
     def _inverse_precond_jacobi(self, A, pos="precond_A"):  # pragma: no cover
         J_1 = np.diag(1.0/np.diag(A))
-        # J_1 = np.linalg.inv(J)
         P = np.dot(J_1, A)
         if np.linalg.cond(A) > np.linalg.cond(P):
             P_inv = spli.pinv(P)
@@ -532,7 +524,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
 
         """
         views = len(self.U_dict)
-        # t = test_kernels[0].shape[0]
         t = test_kernels.shape[0]
         K = np.zeros((t, views * self.n_approx))
         for v in range(views):
@@ -580,7 +571,6 @@ class MVML(MKernel, BaseEstimator, ClassifierMixin, RegressorMixin):
         rounds = 0
 
         L = lmbda * np.linalg.norm(np.dot(g, g.T))
-        # print("L ", L)
 
         while not converged and rounds < 100:
             # no line search - this has worked well enough experimentally