From ddeba9f2e29cad9f45062fb24f83ab97ab7d7ed2 Mon Sep 17 00:00:00 2001
From: Dominique Benielli <dominique.benielli@lis-lab.fr>
Date: Fri, 24 Jan 2020 18:05:27 +0100
Subject: [PATCH] doc fix

---
 doc/index.rst                                 |   4 +-
 .../auto_examples/auto_examples_jupyter.zip   | Bin 93984 -> 32034 bytes
 .../auto_examples/auto_examples_python.zip    | Bin 76976 -> 26186 bytes
 .../cumbo/cumbo_plot_2_views_2_classes.ipynb  |  54 ----
 .../cumbo/cumbo_plot_2_views_2_classes.py     | 127 ----------
 .../cumbo/cumbo_plot_2_views_2_classes.rst    | 170 -------------
 ...umbo_plot_2_views_2_classes_codeobj.pickle | Bin 686 -> 0 bytes
 .../cumbo/cumbo_plot_3_views_3_classes.ipynb  |  54 ----
 .../cumbo/cumbo_plot_3_views_3_classes.py     | 121 ---------
 .../cumbo/cumbo_plot_3_views_3_classes.rst    | 164 -------------
 ...umbo_plot_3_views_3_classes_codeobj.pickle | Bin 686 -> 0 bytes
 .../cumbo_plot_2_views_2_classes.ipynb        |  54 ----
 .../cumbo_plot_2_views_2_classes.py           | 127 ----------
 .../cumbo_plot_2_views_2_classes.rst          | 170 -------------
 ...umbo_plot_2_views_2_classes_codeobj.pickle | Bin 686 -> 0 bytes
 .../cumbo_plot_3_views_3_classes.ipynb        |  54 ----
 .../cumbo_plot_3_views_3_classes.py           | 121 ---------
 .../cumbo_plot_3_views_3_classes.rst          | 164 -------------
 ...umbo_plot_3_views_3_classes_codeobj.pickle | Bin 686 -> 0 bytes
 doc/tutorial/auto_examples/index.rst          |  16 +-
 .../mumbo/mumbo_plot_2_views_2_classes.ipynb  |  54 ----
 .../mumbo/mumbo_plot_2_views_2_classes.py     | 127 ----------
 .../mumbo/mumbo_plot_2_views_2_classes.rst    | 170 -------------
 ...umbo_plot_2_views_2_classes_codeobj.pickle | Bin 682 -> 0 bytes
 .../mumbo/mumbo_plot_3_views_3_classes.ipynb  |  54 ----
 .../mumbo/mumbo_plot_3_views_3_classes.py     | 121 ---------
 .../mumbo/mumbo_plot_3_views_3_classes.rst    | 164 -------------
 ...umbo_plot_3_views_3_classes_codeobj.pickle | Bin 682 -> 0 bytes
 .../mumbo_plot_2_views_2_classes.ipynb        |  54 ----
 .../mumbo_plot_2_views_2_classes.py           | 127 ----------
 .../mumbo_plot_2_views_2_classes.rst          | 170 -------------
 ...umbo_plot_2_views_2_classes_codeobj.pickle | Bin 682 -> 0 bytes
 .../mumbo_plot_3_views_3_classes.ipynb        |  54 ----
 .../mumbo_plot_3_views_3_classes.py           | 121 ---------
 .../mumbo_plot_3_views_3_classes.rst          | 164 -------------
 ...umbo_plot_3_views_3_classes_codeobj.pickle | Bin 682 -> 0 bytes
 .../auto_examples/mvml/mvml_plot_.ipynb       |  54 ----
 doc/tutorial/auto_examples/mvml/mvml_plot_.py | 189 --------------
 .../auto_examples/mvml/mvml_plot_.rst         | 232 ------------------
 .../mvml/mvml_plot__codeobj.pickle            | Bin 1176 -> 0 bytes
 doc/tutorial/auto_examples/mvml_plot_.ipynb   |  54 ----
 doc/tutorial/auto_examples/mvml_plot_.py      | 189 --------------
 doc/tutorial/auto_examples/mvml_plot_.rst     | 232 ------------------
 .../auto_examples/mvml_plot__codeobj.pickle   | Bin 1176 -> 0 bytes
 .../auto_examples/sg_execution_times.rst      |  15 --
 doc/tutorial/times.rst                        |  16 ++
 examples/cumbo/README.txt                     |   4 -
 examples/mumbo/README.txt                     |   4 -
 examples/mvml/README.txt                      |   8 +-
 49 files changed, 22 insertions(+), 3755 deletions(-)
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst
 delete mode 100644 doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mvml/mvml_plot_.py
 delete mode 100644 doc/tutorial/auto_examples/mvml/mvml_plot_.rst
 delete mode 100644 doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/mvml_plot_.ipynb
 delete mode 100644 doc/tutorial/auto_examples/mvml_plot_.py
 delete mode 100644 doc/tutorial/auto_examples/mvml_plot_.rst
 delete mode 100644 doc/tutorial/auto_examples/mvml_plot__codeobj.pickle
 delete mode 100644 doc/tutorial/auto_examples/sg_execution_times.rst
 create mode 100644 doc/tutorial/times.rst

diff --git a/doc/index.rst b/doc/index.rst
index 9c08361..8d2715a 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -24,8 +24,8 @@ Documentation
    reference/api
    tutorial/install_devel
    tutorial/auto_examples/index
-   tutorial/auto_examples/sg_execution_times
-
+   tutorial/times
+   tutorial/credits
 
 
 Indices and tables
diff --git a/doc/tutorial/auto_examples/auto_examples_jupyter.zip b/doc/tutorial/auto_examples/auto_examples_jupyter.zip
index 414431be2fe9801b778c2f36079e5f7fbf1acf21..a0fbdaf0bca00c7f6941fd01e4cbfac684eadd05 100644
GIT binary patch
delta 151
zcmZ2*k9E;6MwS3?W|rv&IgC=9&HE>bZO-4rskhx+f-zfsJEu2er2KTV97Z`thRF?k
zM5brvFe*=f1mrU<6Pw()M{BxX9;3i?n_NaIrf9wC5xI<J)2;Ft<(SvIFielmV>F$9
v8OYO@ieX^LEz8Z(FUZL+i3hRb^)d@8^O6F*S=ktXfE5U5Gcqs~R)cr|H{&bM

delta 1455
zcmZ4Vi*dm{R*e8}W)=|!5Lnq@7T{#qB#|%9zyQKZK(XY~+@$>Yf}H%4$s21VHop<8
z6qf_3d;E817M~ac0|<lE<s#IruLh|LVw~(C!Ls?g{vi>$w^Pgleq=_mnn*J+fUp2a
zLs@PP*xdMy4i=1?>thSK6k_sX@{|>F6SGqlvQvxlQgakiGLuU(^YapmGE<9V@;3Wr
zax&I~)MVxrl$OL-DA+2br<TO0B$gx=1L=aqk_?R)C8%UPP_te^c210vCP+&fP;q`i
zYMusET1O#9sXPfPS&*5Wos+7UQkq+UkWT~YgCK<zTLl-OEy0Pold=;xmu9mtPu`g$
zHTg`g;N+!wVw1n-Dowtdr_COdS5TCiVlsJEj^<>6g5Q%Z3KwtwRj9|dxxDfZ<K$R9
z4shU2TTz&-3JhQnMhiShAVUMs+wG<(Sc73lfN}ybq(K-hRv@7r$+&qwqoAnDq1mWW
zr~!*YeJl|<d4m|sM!rR2OowMr<dNV)Qh{Wsl(WiVprOd|4KfrQ-}?As9jp!6r&zUN
z_;>S8muzumkd??OMiiQ2^hap2nS8ZIWV1z#tGpmaCY{_@EyfDVp(;AqGAA(bz+g$^
zdSHq}i8PfgY?%|G;jb=oR@KWasLV?W@MdHZVMdf0$dL?6oiMPZG2Ro!x{Wmu_n{|e
zgoYytC`HctY84}FNgAOcr5vgOHG!*4uC111lI)*6x7rMtyeHRJ%ZTh>ibd)AYA2@D
zJ(E9IhcHQ9ne15uR;^tlBa-(DYa&+xnbruDKg7U@nF)X%oLpZc$E+*CIC(*hsUfyx
rjBxu5eV`G*Vh|}&Pd;2@!2I8faq_nsIX->{eugb93=Cmej6h8Q0eaqv

diff --git a/doc/tutorial/auto_examples/auto_examples_python.zip b/doc/tutorial/auto_examples/auto_examples_python.zip
index ac5d7f5242bf3a39fc161b786c227078135bab41..e9eb1dd58513cedc034ae8595e7d768f4a1f4ed3 100644
GIT binary patch
delta 144
zcmdmRljYPI#?1nynL?Xy%q&ygT=hg=czY%v<2K3Z4>cL(rrT;Ta!%*aV$_-5uEi+F
z6fZQLQHxP{^52;v(-Slpd8Xe2iJnlM{!5F|c=}~+Am^(2<en$$5ZN>xMme1?UJMMm
kWw|-}1v&X8@gP>bUO{DmH!B+h5U>Ja4<iG^iWCqJ058NWlK=n!

delta 1342
zcmX?ghH=A97L5RJW)=|!5Lnq@7GUhZ@PwK$0|N-l1I3a{bCdGp3v%*HCf`pLoy@If
zv-zIT7a=*2+U>4>w@(Q&Fn};fZ7y8xM7_Yx?rJjxW#3LQ3rOjZJ+?rMfdPbhLCVT<
zbHFBUR`8z0Db1y<keisDs*s&pl$V;Lkdm2Pl9`{ESd^Jsyjd`mg)sxDATzI^v?RVl
z!B!zXwIn_zu_Un=NEalQWN0Ws<>G-F^$N0clr(|r(|{`T3sUnmpprTYO65slnS#vZ
z?3`4+l+xS+gk%~}50~=f)nQqiHN!tJO-_!GnrsrKGI@HW(&VZrEjF%#qSO?V$srM%
zlb1z*oP0QD%I4zOm#mZLrSPaB2cDJD{_;CQ3=ANQ8X(ZXbKV?nu^19~4dRmdGC*}8
zj2h4z^{j<d4$Vf53sqQL=;Mfyja>0UOowJq<dNV)sMx5t(Ol&)&?w}n0vQF4Dt*id
zV>&#0@<Ve8E`$t<%QxRQmlsw78HgOIKp%i3RexB;?B;J?`y_R+<q4p7!C*-vh(d%m
zFsKy48jwR96t^(2q%j$ot&qaDCsi>WTNXiR$O6U|JXquP3MvD<8JR?w5g8Ue%^|d!
z+Mw74a~yiYLul~xNAV5NaliyMnKM;JWO*V~1~v65PTrCt$0SfXQAU08?-U**P@+XP
z9+c8xU`b<sI~GHFQdK8&r%ExMn>o2!NPV(jsuq*^+Q}cyg(vr>Du7aB4A6R){ZOAE
zrP#?J=K<x{1LXrRVvz^>ck<d)Ii`XqK;u9r^B7`FxCr<4e}@D$A{oa|{+O!E9M8u%
WSustTkDq~`p`C?+L0E$ks0jdMA*hD{

diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb
deleted file mode 100644
index 81b05f4..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n==================================\nMuCombo 2 views, 2 classes example\n==================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuCuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuCuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe two first sub-classifiers use the data of view 0 to compute '\n#       'their\\nclassification results, while the third one uses the data of '\n#       'view 1:\\n'\n#       '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n    ax = plt.subplot(2, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    styles = ('.b', 'xb', '.r', 'xr')\n    labels = ('non-separated', 'separated')\n    for ind in range(4):\n        ind_class = ind // 2\n        label = labels[(ind + ind_view) % 2]\n        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n                styles[ind],\n                label='Class {} ({})'.format(ind_class, label))\n    ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\n# fig = plt.figure(figsize=(12., 7.))\n# fig.suptitle('Classification results on the learning data for the '\n#              'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n#     best_view = clf.best_views_[ind_estimator]\n#     y_pred = clf.estimators_[ind_estimator].predict(\n#         X[:, 2*best_view:2*best_view+2])\n#     background_color = (1.0, 1.0, 0.9)\n#     for ind_view in range(2):\n#         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n#         if ind_view == best_view:\n#             ax.set_facecolor(background_color)\n#         ax.set_title(\n#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n#         ind_feature = ind_view * 2\n#         for ind_class in range(2):\n#             ind_samples = (y_pred == ind_class)\n#             ax.plot(X[ind_samples, ind_feature],\n#                     X[ind_samples, ind_feature + 1],\n#                     styles[ind_class],\n#                     label='Class {}'.format(ind_class))\n#         ax.legend(title='Predicted class:')\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py
deleted file mode 100644
index c669f69..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-==================================
-MuCombo 2 views, 2 classes example
-==================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuCuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.cumbo import MuCumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 100
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1), axis=1)
-
-y = np.zeros(4*n_samples, dtype=np.int64)
-y[2*n_samples:] = 1
-
-views_ind = np.array([0, 2, 4])
-
-n_estimators = 3
-clf = MuCumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_alpha_))
-
-# print('\nThe two first sub-classifiers use the data of view 0 to compute '
-#       'their\nclassification results, while the third one uses the data of '
-#       'view 1:\n'
-#       '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\ntwo views.')
-
-fig = plt.figure(figsize=(10., 8.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(2):
-    ax = plt.subplot(2, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    styles = ('.b', 'xb', '.r', 'xr')
-    labels = ('non-separated', 'separated')
-    for ind in range(4):
-        ind_class = ind // 2
-        label = labels[(ind + ind_view) % 2]
-        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                styles[ind],
-                label='Class {} ({})'.format(ind_class, label))
-    ax.legend()
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-styles = ('.b', '.r')
-# fig = plt.figure(figsize=(12., 7.))
-# fig.suptitle('Classification results on the learning data for the '
-#              'sub-classifiers', size=16)
-# for ind_estimator in range(n_estimators):
-#     best_view = clf.best_views_[ind_estimator]
-#     y_pred = clf.estimators_[ind_estimator].predict(
-#         X[:, 2*best_view:2*best_view+2])
-#     background_color = (1.0, 1.0, 0.9)
-#     for ind_view in range(2):
-#         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-#         if ind_view == best_view:
-#             ax.set_facecolor(background_color)
-#         ax.set_title(
-#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-#         ind_feature = ind_view * 2
-#         for ind_class in range(2):
-#             ind_samples = (y_pred == ind_class)
-#             ax.plot(X[ind_samples, ind_feature],
-#                     X[ind_samples, ind_feature + 1],
-#                     styles[ind_class],
-#                     label='Class {}'.format(ind_class))
-#         ax.legend(title='Predicted class:')
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst
deleted file mode 100644
index e539862..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py:
-
-
-==================================
-MuCombo 2 views, 2 classes example
-==================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuCuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.cumbo import MuCumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 100
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1), axis=1)
-
-    y = np.zeros(4*n_samples, dtype=np.int64)
-    y[2*n_samples:] = 1
-
-    views_ind = np.array([0, 2, 4])
-
-    n_estimators = 3
-    clf = MuCumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-    print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_alpha_))
-
-    # print('\nThe two first sub-classifiers use the data of view 0 to compute '
-    #       'their\nclassification results, while the third one uses the data of '
-    #       'view 1:\n'
-    #       '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\ntwo views.')
-
-    fig = plt.figure(figsize=(10., 8.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        styles = ('.b', 'xb', '.r', 'xr')
-        labels = ('non-separated', 'separated')
-        for ind in range(4):
-            ind_class = ind // 2
-            label = labels[(ind + ind_view) % 2]
-            ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                    X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                    styles[ind],
-                    label='Class {} ({})'.format(ind_class, label))
-        ax.legend()
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    styles = ('.b', '.r')
-    # fig = plt.figure(figsize=(12., 7.))
-    # fig.suptitle('Classification results on the learning data for the '
-    #              'sub-classifiers', size=16)
-    # for ind_estimator in range(n_estimators):
-    #     best_view = clf.best_views_[ind_estimator]
-    #     y_pred = clf.estimators_[ind_estimator].predict(
-    #         X[:, 2*best_view:2*best_view+2])
-    #     background_color = (1.0, 1.0, 0.9)
-    #     for ind_view in range(2):
-    #         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-    #         if ind_view == best_view:
-    #             ax.set_facecolor(background_color)
-    #         ax.set_title(
-    #             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-    #         ind_feature = ind_view * 2
-    #         for ind_class in range(2):
-    #             ind_samples = (y_pred == ind_class)
-    #             ax.plot(X[ind_samples, ind_feature],
-    #                     X[ind_samples, ind_feature + 1],
-    #                     styles[ind_class],
-    #                     label='Class {}'.format(ind_class))
-    #         ax.legend(title='Predicted class:')
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_2_views_2_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: cumbo_plot_2_views_2_classes.py <cumbo_plot_2_views_2_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: cumbo_plot_2_views_2_classes.ipynb <cumbo_plot_2_views_2_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/cumbo_plot_2_views_2_classes_codeobj.pickle
deleted file mode 100644
index 390f2a42106ab46aa0b53783421621e8c579b93c..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 686
zcmb7B%}T^D5EkjywOtlhPu@K0B`5I()~g7<fTV496Ua|VGDro%2guF%1iq+A)2<W>
zvVp+Nm-)V#Z}ORa|D2xm$Cb6>oYa;nX}l|+bwf019$9Vum%n@Qw{9t0gtf+<{|H$o
zu^`?p65?{98sHI-AllBmRRUfORoZ%b4n3bq`*0umG<S=X(B;UX1zm$%hc>m6HMT^C
zI~0fU{-6ov65Qqqwje6C<=7auV?+<EnTNo=7QihF^I?uErHo~AkLoDkY2mJBUVR!%
zW7#Ny3?a?XAnW6xx$14BR1sySKyv?1v@)anluWGLfGb*V(T?pwkIwTU3zZvzZOfJA
ztU_%Uqu6N%1y@EL165y?_I);{$0;p)C92*ImScRJ#zpwkVR^%b(n$HZum2Ya=oedC
B2a*5)

diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb
deleted file mode 100644
index a6981bb..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n==================================\nMuCumbo 3 views, 3 classes example\n==================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuCuMbo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuCuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe first sub-classifier uses the data of view 0 to compute '\n#       'its classification\\nresults, the second and third sub-classifiers use '\n#       'the data of view 1, while the\\nfourth one uses the data of '\n#       'view 2:\\n'\n#       '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n    ax = plt.subplot(3, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    for ind_class in range(3):\n        ind_samples = (y == ind_class)\n        ax.plot(X[ind_samples, ind_feature],\n                X[ind_samples, ind_feature + 1],\n                styles[ind_class],\n                label='Class {}'.format(ind_class))\n    ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\n# fig = plt.figure(figsize=(14., 11.))\n# fig.suptitle('Classification results on the learning data for the '\n#              'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n#     best_view = clf.best_views_[ind_estimator]\n#     y_pred = clf.estimators_[ind_estimator].predict(\n#         X[:, 2*best_view:2*best_view+2])\n#     background_color = (1.0, 1.0, 0.9)\n#     for ind_view in range(3):\n#         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n#         if ind_view == best_view:\n#             ax.set_facecolor(background_color)\n#         ax.set_title(\n#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n#         ind_feature = ind_view * 2\n#         for ind_class in range(3):\n#             ind_samples = (y_pred == ind_class)\n#             ax.plot(X[ind_samples, ind_feature],\n#                     X[ind_samples, ind_feature + 1],\n#                     styles[ind_class],\n#                     label='Class {}'.format(ind_class))\n#         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py
deleted file mode 100644
index 058b2dc..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-==================================
-MuCumbo 3 views, 3 classes example
-==================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.cumbo import MuCumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 300
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-y = np.zeros(3*n_samples, dtype=np.int64)
-y[n_samples:2*n_samples] = 1
-y[2*n_samples:] = 2
-
-views_ind = np.array([0, 2, 4, 6])
-
-n_estimators = 4
-clf = MuCumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))
-
-# print('\nThe first sub-classifier uses the data of view 0 to compute '
-#       'its classification\nresults, the second and third sub-classifiers use '
-#       'the data of view 1, while the\nfourth one uses the data of '
-#       'view 2:\n'
-#       '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\nthree views.')
-
-styles = ('.b', '.r', '.g')
-fig = plt.figure(figsize=(12., 11.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(3):
-    ax = plt.subplot(3, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    for ind_class in range(3):
-        ind_samples = (y == ind_class)
-        ax.plot(X[ind_samples, ind_feature],
-                X[ind_samples, ind_feature + 1],
-                styles[ind_class],
-                label='Class {}'.format(ind_class))
-    ax.legend(loc='upper left', framealpha=0.9)
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-# fig = plt.figure(figsize=(14., 11.))
-# fig.suptitle('Classification results on the learning data for the '
-#              'sub-classifiers', size=16)
-# for ind_estimator in range(n_estimators):
-#     best_view = clf.best_views_[ind_estimator]
-#     y_pred = clf.estimators_[ind_estimator].predict(
-#         X[:, 2*best_view:2*best_view+2])
-#     background_color = (1.0, 1.0, 0.9)
-#     for ind_view in range(3):
-#         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-#         if ind_view == best_view:
-#             ax.set_facecolor(background_color)
-#         ax.set_title(
-#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-#         ind_feature = ind_view * 2
-#         for ind_class in range(3):
-#             ind_samples = (y_pred == ind_class)
-#             ax.plot(X[ind_samples, ind_feature],
-#                     X[ind_samples, ind_feature + 1],
-#                     styles[ind_class],
-#                     label='Class {}'.format(ind_class))
-#         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst
deleted file mode 100644
index aa7cfab..0000000
--- a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes.rst
+++ /dev/null
@@ -1,164 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py:
-
-
-==================================
-MuCumbo 3 views, 3 classes example
-==================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.cumbo import MuCumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 300
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-    y = np.zeros(3*n_samples, dtype=np.int64)
-    y[n_samples:2*n_samples] = 1
-    y[2*n_samples:] = 2
-
-    views_ind = np.array([0, 2, 4, 6])
-
-    n_estimators = 4
-    clf = MuCumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-    print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))
-
-    # print('\nThe first sub-classifier uses the data of view 0 to compute '
-    #       'its classification\nresults, the second and third sub-classifiers use '
-    #       'the data of view 1, while the\nfourth one uses the data of '
-    #       'view 2:\n'
-    #       '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\nthree views.')
-
-    styles = ('.b', '.r', '.g')
-    fig = plt.figure(figsize=(12., 11.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(loc='upper left', framealpha=0.9)
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    # fig = plt.figure(figsize=(14., 11.))
-    # fig.suptitle('Classification results on the learning data for the '
-    #              'sub-classifiers', size=16)
-    # for ind_estimator in range(n_estimators):
-    #     best_view = clf.best_views_[ind_estimator]
-    #     y_pred = clf.estimators_[ind_estimator].predict(
-    #         X[:, 2*best_view:2*best_view+2])
-    #     background_color = (1.0, 1.0, 0.9)
-    #     for ind_view in range(3):
-    #         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-    #         if ind_view == best_view:
-    #             ax.set_facecolor(background_color)
-    #         ax.set_title(
-    #             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-    #         ind_feature = ind_view * 2
-    #         for ind_class in range(3):
-    #             ind_samples = (y_pred == ind_class)
-    #             ax.plot(X[ind_samples, ind_feature],
-    #                     X[ind_samples, ind_feature + 1],
-    #                     styles[ind_class],
-    #                     label='Class {}'.format(ind_class))
-    #         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_cumbo_cumbo_plot_3_views_3_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: cumbo_plot_3_views_3_classes.py <cumbo_plot_3_views_3_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: cumbo_plot_3_views_3_classes.ipynb <cumbo_plot_3_views_3_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo/cumbo_plot_3_views_3_classes_codeobj.pickle
deleted file mode 100644
index 390f2a42106ab46aa0b53783421621e8c579b93c..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 686
zcmb7B%}T^D5EkjywOtlhPu@K0B`5I()~g7<fTV496Ua|VGDro%2guF%1iq+A)2<W>
zvVp+Nm-)V#Z}ORa|D2xm$Cb6>oYa;nX}l|+bwf019$9Vum%n@Qw{9t0gtf+<{|H$o
zu^`?p65?{98sHI-AllBmRRUfORoZ%b4n3bq`*0umG<S=X(B;UX1zm$%hc>m6HMT^C
zI~0fU{-6ov65Qqqwje6C<=7auV?+<EnTNo=7QihF^I?uErHo~AkLoDkY2mJBUVR!%
zW7#Ny3?a?XAnW6xx$14BR1sySKyv?1v@)anluWGLfGb*V(T?pwkIwTU3zZvzZOfJA
ztU_%Uqu6N%1y@EL165y?_I);{$0;p)C92*ImScRJ#zpwkVR^%b(n$HZum2Ya=oedC
B2a*5)

diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb
deleted file mode 100644
index 81b05f4..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n==================================\nMuCombo 2 views, 2 classes example\n==================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuCuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuCuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe two first sub-classifiers use the data of view 0 to compute '\n#       'their\\nclassification results, while the third one uses the data of '\n#       'view 1:\\n'\n#       '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n    ax = plt.subplot(2, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    styles = ('.b', 'xb', '.r', 'xr')\n    labels = ('non-separated', 'separated')\n    for ind in range(4):\n        ind_class = ind // 2\n        label = labels[(ind + ind_view) % 2]\n        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n                styles[ind],\n                label='Class {} ({})'.format(ind_class, label))\n    ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\n# fig = plt.figure(figsize=(12., 7.))\n# fig.suptitle('Classification results on the learning data for the '\n#              'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n#     best_view = clf.best_views_[ind_estimator]\n#     y_pred = clf.estimators_[ind_estimator].predict(\n#         X[:, 2*best_view:2*best_view+2])\n#     background_color = (1.0, 1.0, 0.9)\n#     for ind_view in range(2):\n#         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n#         if ind_view == best_view:\n#             ax.set_facecolor(background_color)\n#         ax.set_title(\n#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n#         ind_feature = ind_view * 2\n#         for ind_class in range(2):\n#             ind_samples = (y_pred == ind_class)\n#             ax.plot(X[ind_samples, ind_feature],\n#                     X[ind_samples, ind_feature + 1],\n#                     styles[ind_class],\n#                     label='Class {}'.format(ind_class))\n#         ax.legend(title='Predicted class:')\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py
deleted file mode 100644
index c669f69..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-==================================
-MuCombo 2 views, 2 classes example
-==================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuCuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.cumbo import MuCumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 100
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1), axis=1)
-
-y = np.zeros(4*n_samples, dtype=np.int64)
-y[2*n_samples:] = 1
-
-views_ind = np.array([0, 2, 4])
-
-n_estimators = 3
-clf = MuCumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_alpha_))
-
-# print('\nThe two first sub-classifiers use the data of view 0 to compute '
-#       'their\nclassification results, while the third one uses the data of '
-#       'view 1:\n'
-#       '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\ntwo views.')
-
-fig = plt.figure(figsize=(10., 8.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(2):
-    ax = plt.subplot(2, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    styles = ('.b', 'xb', '.r', 'xr')
-    labels = ('non-separated', 'separated')
-    for ind in range(4):
-        ind_class = ind // 2
-        label = labels[(ind + ind_view) % 2]
-        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                styles[ind],
-                label='Class {} ({})'.format(ind_class, label))
-    ax.legend()
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-styles = ('.b', '.r')
-# fig = plt.figure(figsize=(12., 7.))
-# fig.suptitle('Classification results on the learning data for the '
-#              'sub-classifiers', size=16)
-# for ind_estimator in range(n_estimators):
-#     best_view = clf.best_views_[ind_estimator]
-#     y_pred = clf.estimators_[ind_estimator].predict(
-#         X[:, 2*best_view:2*best_view+2])
-#     background_color = (1.0, 1.0, 0.9)
-#     for ind_view in range(2):
-#         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-#         if ind_view == best_view:
-#             ax.set_facecolor(background_color)
-#         ax.set_title(
-#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-#         ind_feature = ind_view * 2
-#         for ind_class in range(2):
-#             ind_samples = (y_pred == ind_class)
-#             ax.plot(X[ind_samples, ind_feature],
-#                     X[ind_samples, ind_feature + 1],
-#                     styles[ind_class],
-#                     label='Class {}'.format(ind_class))
-#         ax.legend(title='Predicted class:')
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst
deleted file mode 100644
index 74f22bd..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py:
-
-
-==================================
-MuCombo 2 views, 2 classes example
-==================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuCuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.cumbo import MuCumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 100
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1), axis=1)
-
-    y = np.zeros(4*n_samples, dtype=np.int64)
-    y[2*n_samples:] = 1
-
-    views_ind = np.array([0, 2, 4])
-
-    n_estimators = 3
-    clf = MuCumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 3 iterations, the MuCuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-    print('\nThe resulting MuCuMBo classifier uses three sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_alpha_))
-
-    # print('\nThe two first sub-classifiers use the data of view 0 to compute '
-    #       'their\nclassification results, while the third one uses the data of '
-    #       'view 1:\n'
-    #       '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\ntwo views.')
-
-    fig = plt.figure(figsize=(10., 8.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        styles = ('.b', 'xb', '.r', 'xr')
-        labels = ('non-separated', 'separated')
-        for ind in range(4):
-            ind_class = ind // 2
-            label = labels[(ind + ind_view) % 2]
-            ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                    X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                    styles[ind],
-                    label='Class {} ({})'.format(ind_class, label))
-        ax.legend()
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    styles = ('.b', '.r')
-    # fig = plt.figure(figsize=(12., 7.))
-    # fig.suptitle('Classification results on the learning data for the '
-    #              'sub-classifiers', size=16)
-    # for ind_estimator in range(n_estimators):
-    #     best_view = clf.best_views_[ind_estimator]
-    #     y_pred = clf.estimators_[ind_estimator].predict(
-    #         X[:, 2*best_view:2*best_view+2])
-    #     background_color = (1.0, 1.0, 0.9)
-    #     for ind_view in range(2):
-    #         ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-    #         if ind_view == best_view:
-    #             ax.set_facecolor(background_color)
-    #         ax.set_title(
-    #             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-    #         ind_feature = ind_view * 2
-    #         for ind_class in range(2):
-    #             ind_samples = (y_pred == ind_class)
-    #             ax.plot(X[ind_samples, ind_feature],
-    #                     X[ind_samples, ind_feature + 1],
-    #                     styles[ind_class],
-    #                     label='Class {}'.format(ind_class))
-    #         ax.legend(title='Predicted class:')
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_2_views_2_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: cumbo_plot_2_views_2_classes.py <cumbo_plot_2_views_2_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: cumbo_plot_2_views_2_classes.ipynb <cumbo_plot_2_views_2_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo_plot_2_views_2_classes_codeobj.pickle
deleted file mode 100644
index 4d050d75629df8605ed87caa1475615c38a68de4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 686
zcmb7B%}N6?5Ej|4>uwQ?7kvTIOHRcXs8<nu0m<&BO&~u@l0jAwe1P1HPvDE1Y*I_1
z2O9{?_a*bqH#48b_s{9cbX?OZ&PivPlE%B{S@%Sf{*iUofBE|tf9EPWAgnbW{YOBB
z#DaJ?ON=Xp>VZc>f*3~c=81SURB7w!Ik;-z76teg^J(ddDW;WM2y6$gEN3km#u#~8
zA7a5>1P>a^UI}Cf>Hm~s9F*o$@%$r->=P-!Q?kRB$Z&Ih=zT{8ncM@d={2~^UCC5A
zCzTofr|jYhRv=on;@B9rW<+O^0}ny(MgX@g3=nhFDrGE_8`MRC*wfX4S0Bb3e7+N{
zOqQM{u-DElxMtb{w9}Z=BF;>P;qMW<sH4)FZ9s45SY~vbPBhM>eOPXdC$MLH34ne9
D(SQfU

diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb
deleted file mode 100644
index a6981bb..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n==================================\nMuCumbo 3 views, 3 classes example\n==================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuCuMbo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.cumbo import MuCumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MuCumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuCuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))\n\n# print('\\nThe first sub-classifier uses the data of view 0 to compute '\n#       'its classification\\nresults, the second and third sub-classifiers use '\n#       'the data of view 1, while the\\nfourth one uses the data of '\n#       'view 2:\\n'\n#       '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n    ax = plt.subplot(3, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    for ind_class in range(3):\n        ind_samples = (y == ind_class)\n        ax.plot(X[ind_samples, ind_feature],\n                X[ind_samples, ind_feature + 1],\n                styles[ind_class],\n                label='Class {}'.format(ind_class))\n    ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\n# fig = plt.figure(figsize=(14., 11.))\n# fig.suptitle('Classification results on the learning data for the '\n#              'sub-classifiers', size=16)\n# for ind_estimator in range(n_estimators):\n#     best_view = clf.best_views_[ind_estimator]\n#     y_pred = clf.estimators_[ind_estimator].predict(\n#         X[:, 2*best_view:2*best_view+2])\n#     background_color = (1.0, 1.0, 0.9)\n#     for ind_view in range(3):\n#         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n#         if ind_view == best_view:\n#             ax.set_facecolor(background_color)\n#         ax.set_title(\n#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n#         ind_feature = ind_view * 2\n#         for ind_class in range(3):\n#             ind_samples = (y_pred == ind_class)\n#             ax.plot(X[ind_samples, ind_feature],\n#                     X[ind_samples, ind_feature + 1],\n#                     styles[ind_class],\n#                     label='Class {}'.format(ind_class))\n#         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py
deleted file mode 100644
index 058b2dc..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-==================================
-MuCumbo 3 views, 3 classes example
-==================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.cumbo import MuCumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 300
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-y = np.zeros(3*n_samples, dtype=np.int64)
-y[n_samples:2*n_samples] = 1
-y[2*n_samples:] = 2
-
-views_ind = np.array([0, 2, 4, 6])
-
-n_estimators = 4
-clf = MuCumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))
-
-# print('\nThe first sub-classifier uses the data of view 0 to compute '
-#       'its classification\nresults, the second and third sub-classifiers use '
-#       'the data of view 1, while the\nfourth one uses the data of '
-#       'view 2:\n'
-#       '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\nthree views.')
-
-styles = ('.b', '.r', '.g')
-fig = plt.figure(figsize=(12., 11.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(3):
-    ax = plt.subplot(3, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    for ind_class in range(3):
-        ind_samples = (y == ind_class)
-        ax.plot(X[ind_samples, ind_feature],
-                X[ind_samples, ind_feature + 1],
-                styles[ind_class],
-                label='Class {}'.format(ind_class))
-    ax.legend(loc='upper left', framealpha=0.9)
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-# fig = plt.figure(figsize=(14., 11.))
-# fig.suptitle('Classification results on the learning data for the '
-#              'sub-classifiers', size=16)
-# for ind_estimator in range(n_estimators):
-#     best_view = clf.best_views_[ind_estimator]
-#     y_pred = clf.estimators_[ind_estimator].predict(
-#         X[:, 2*best_view:2*best_view+2])
-#     background_color = (1.0, 1.0, 0.9)
-#     for ind_view in range(3):
-#         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-#         if ind_view == best_view:
-#             ax.set_facecolor(background_color)
-#         ax.set_title(
-#             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-#         ind_feature = ind_view * 2
-#         for ind_class in range(3):
-#             ind_samples = (y_pred == ind_class)
-#             ax.plot(X[ind_samples, ind_feature],
-#                     X[ind_samples, ind_feature + 1],
-#                     styles[ind_class],
-#                     label='Class {}'.format(ind_class))
-#         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst
deleted file mode 100644
index a002f63..0000000
--- a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes.rst
+++ /dev/null
@@ -1,164 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py:
-
-
-==================================
-MuCumbo 3 views, 3 classes example
-==================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuCuMbo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.cumbo import MuCumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 300
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-    y = np.zeros(3*n_samples, dtype=np.int64)
-    y[n_samples:2*n_samples] = 1
-    y[2*n_samples:] = 2
-
-    views_ind = np.array([0, 2, 4, 6])
-
-    n_estimators = 4
-    clf = MuCumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 4 iterations, the MuCuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-    print('\nThe resulting MuCuMBo classifier uses four sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights alpha: {}'.format(clf.estimator_weights_alpha_))
-
-    # print('\nThe first sub-classifier uses the data of view 0 to compute '
-    #       'its classification\nresults, the second and third sub-classifiers use '
-    #       'the data of view 1, while the\nfourth one uses the data of '
-    #       'view 2:\n'
-    #       '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\nthree views.')
-
-    styles = ('.b', '.r', '.g')
-    fig = plt.figure(figsize=(12., 11.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(loc='upper left', framealpha=0.9)
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    # fig = plt.figure(figsize=(14., 11.))
-    # fig.suptitle('Classification results on the learning data for the '
-    #              'sub-classifiers', size=16)
-    # for ind_estimator in range(n_estimators):
-    #     best_view = clf.best_views_[ind_estimator]
-    #     y_pred = clf.estimators_[ind_estimator].predict(
-    #         X[:, 2*best_view:2*best_view+2])
-    #     background_color = (1.0, 1.0, 0.9)
-    #     for ind_view in range(3):
-    #         ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-    #         if ind_view == best_view:
-    #             ax.set_facecolor(background_color)
-    #         ax.set_title(
-    #             'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-    #         ind_feature = ind_view * 2
-    #         for ind_class in range(3):
-    #             ind_samples = (y_pred == ind_class)
-    #             ax.plot(X[ind_samples, ind_feature],
-    #                     X[ind_samples, ind_feature + 1],
-    #                     styles[ind_class],
-    #                     label='Class {}'.format(ind_class))
-    #         ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_cumbo_plot_3_views_3_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: cumbo_plot_3_views_3_classes.py <cumbo_plot_3_views_3_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: cumbo_plot_3_views_3_classes.ipynb <cumbo_plot_3_views_3_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/cumbo_plot_3_views_3_classes_codeobj.pickle
deleted file mode 100644
index 4d050d75629df8605ed87caa1475615c38a68de4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 686
zcmb7B%}N6?5Ej|4>uwQ?7kvTIOHRcXs8<nu0m<&BO&~u@l0jAwe1P1HPvDE1Y*I_1
z2O9{?_a*bqH#48b_s{9cbX?OZ&PivPlE%B{S@%Sf{*iUofBE|tf9EPWAgnbW{YOBB
z#DaJ?ON=Xp>VZc>f*3~c=81SURB7w!Ik;-z76teg^J(ddDW;WM2y6$gEN3km#u#~8
zA7a5>1P>a^UI}Cf>Hm~s9F*o$@%$r->=P-!Q?kRB$Z&Ih=zT{8ncM@d={2~^UCC5A
zCzTofr|jYhRv=on;@B9rW<+O^0}ny(MgX@g3=nhFDrGE_8`MRC*wfX4S0Bb3e7+N{
zOqQM{u-DElxMtb{w9}Z=BF;>P;qMW<sH4)FZ9s45SY~vbPBhM>eOPXdC$MLH34ne9
D(SQfU

diff --git a/doc/tutorial/auto_examples/index.rst b/doc/tutorial/auto_examples/index.rst
index 17002d9..c3430b3 100644
--- a/doc/tutorial/auto_examples/index.rst
+++ b/doc/tutorial/auto_examples/index.rst
@@ -21,10 +21,6 @@ Multimodal Examples
 
 .. _sphx_glr_tutorial_auto_examples_cumbo:
 
-.. _examples:
-
-Examples
-========
 
 MuCuMBo Examples
 ----------------
@@ -82,10 +78,6 @@ cooperation between views for classification.
 
 .. _sphx_glr_tutorial_auto_examples_mumbo:
 
-.. _examples:
-
-Examples
-========
 
 MuMBo Examples
 --------------
@@ -143,13 +135,9 @@ cooperation between views for classification.
 
 .. _sphx_glr_tutorial_auto_examples_mvml:
 
-.. _examples:
-
-Examples
-========
 
-MVML
-----
+MVML Examples
+-------------
 
 The following toy examples illustrate how the MVML algorithm
 
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb
deleted file mode 100644
index bfbb2d3..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n================================\nMumbo 2 views, 2 classes example\n================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n      'their\\nclassification results, while the third one uses the data of '\n      'view 1:\\n'\n      '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n    ax = plt.subplot(2, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    styles = ('.b', 'xb', '.r', 'xr')\n    labels = ('non-separated', 'separated')\n    for ind in range(4):\n        ind_class = ind // 2\n        label = labels[(ind + ind_view) % 2]\n        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n                styles[ind],\n                label='Class {} ({})'.format(ind_class, label))\n    ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n             'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n    best_view = clf.best_views_[ind_estimator]\n    y_pred = clf.estimators_[ind_estimator].predict(\n        X[:, 2*best_view:2*best_view+2])\n    background_color = (1.0, 1.0, 0.9)\n    for ind_view in range(2):\n        ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n        if ind_view == best_view:\n            ax.set_facecolor(background_color)\n        ax.set_title(\n            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n        ind_feature = ind_view * 2\n        for ind_class in range(2):\n            ind_samples = (y_pred == ind_class)\n            ax.plot(X[ind_samples, ind_feature],\n                    X[ind_samples, ind_feature + 1],\n                    styles[ind_class],\n                    label='Class {}'.format(ind_class))\n        ax.legend(title='Predicted class:')\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py
deleted file mode 100644
index 91f0dc7..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-================================
-Mumbo 2 views, 2 classes example
-================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.mumbo import MumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 100
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1), axis=1)
-
-y = np.zeros(4*n_samples, dtype=np.int64)
-y[2*n_samples:] = 1
-
-views_ind = np.array([0, 2, 4])
-
-n_estimators = 3
-clf = MumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 3 iterations, the MuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-print('\nThe resulting MuMBo classifier uses three sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_))
-
-print('\nThe two first sub-classifiers use the data of view 0 to compute '
-      'their\nclassification results, while the third one uses the data of '
-      'view 1:\n'
-      '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\ntwo views.')
-
-fig = plt.figure(figsize=(10., 8.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(2):
-    ax = plt.subplot(2, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    styles = ('.b', 'xb', '.r', 'xr')
-    labels = ('non-separated', 'separated')
-    for ind in range(4):
-        ind_class = ind // 2
-        label = labels[(ind + ind_view) % 2]
-        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                styles[ind],
-                label='Class {} ({})'.format(ind_class, label))
-    ax.legend()
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-styles = ('.b', '.r')
-fig = plt.figure(figsize=(12., 7.))
-fig.suptitle('Classification results on the learning data for the '
-             'sub-classifiers', size=16)
-for ind_estimator in range(n_estimators):
-    best_view = clf.best_views_[ind_estimator]
-    y_pred = clf.estimators_[ind_estimator].predict(
-        X[:, 2*best_view:2*best_view+2])
-    background_color = (1.0, 1.0, 0.9)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-        if ind_view == best_view:
-            ax.set_facecolor(background_color)
-        ax.set_title(
-            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(2):
-            ind_samples = (y_pred == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(title='Predicted class:')
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst
deleted file mode 100644
index 712a45e..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py:
-
-
-================================
-Mumbo 2 views, 2 classes example
-================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.mumbo import MumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 100
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1), axis=1)
-
-    y = np.zeros(4*n_samples, dtype=np.int64)
-    y[2*n_samples:] = 1
-
-    views_ind = np.array([0, 2, 4])
-
-    n_estimators = 3
-    clf = MumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 3 iterations, the MuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-    print('\nThe resulting MuMBo classifier uses three sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_))
-
-    print('\nThe two first sub-classifiers use the data of view 0 to compute '
-          'their\nclassification results, while the third one uses the data of '
-          'view 1:\n'
-          '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\ntwo views.')
-
-    fig = plt.figure(figsize=(10., 8.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        styles = ('.b', 'xb', '.r', 'xr')
-        labels = ('non-separated', 'separated')
-        for ind in range(4):
-            ind_class = ind // 2
-            label = labels[(ind + ind_view) % 2]
-            ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                    X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                    styles[ind],
-                    label='Class {} ({})'.format(ind_class, label))
-        ax.legend()
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    styles = ('.b', '.r')
-    fig = plt.figure(figsize=(12., 7.))
-    fig.suptitle('Classification results on the learning data for the '
-                 'sub-classifiers', size=16)
-    for ind_estimator in range(n_estimators):
-        best_view = clf.best_views_[ind_estimator]
-        y_pred = clf.estimators_[ind_estimator].predict(
-            X[:, 2*best_view:2*best_view+2])
-        background_color = (1.0, 1.0, 0.9)
-        for ind_view in range(2):
-            ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-            if ind_view == best_view:
-                ax.set_facecolor(background_color)
-            ax.set_title(
-                'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-            ind_feature = ind_view * 2
-            for ind_class in range(2):
-                ind_samples = (y_pred == ind_class)
-                ax.plot(X[ind_samples, ind_feature],
-                        X[ind_samples, ind_feature + 1],
-                        styles[ind_class],
-                        label='Class {}'.format(ind_class))
-            ax.legend(title='Predicted class:')
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_2_views_2_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mumbo_plot_2_views_2_classes.py <mumbo_plot_2_views_2_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mumbo_plot_2_views_2_classes.ipynb <mumbo_plot_2_views_2_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/mumbo_plot_2_views_2_classes_codeobj.pickle
deleted file mode 100644
index 42c59a482683c7d52f4b885b7d85822c0db5552f..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 682
zcmb7B%SyyB6cw4NwH?OSO8f!cWF`K9Zbk41ByBU3K$2VXKq?4+Kz7EpziQH?%IJVh
zA#l&>J@+2&d7FKF?(dDCt6Silw1$Dy-qjDPBZ_o)tTq1KUp@LuSIQn?rSRZi1DZ)J
zh<EeExI#c@c_gG2{ovgq5l@<eGM*mVQO~5gxsG~TxOoD!oHVqeOY4?lO$)NchRE=8
zbFlYAZAea&L}A&1K$?&)OJ-vodKaT5lsY`Eei6yp3sImaX9;Yqxv;M4xj`$owrX-!
zWE4721U5Yf!&!s+Ay#qI7S_6>dn+2a<ydRBVnpSwf9NpvTv%>cI3?z&0nmoYH4;%C
yr=M;4>#eJ!Pi>pgM7s6C1b1^<j*}=nf1fngyUjQbdFF6(0?AS_<?(+&7JUKkJ_a8E

diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb
deleted file mode 100644
index 371d83e..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n================================\nMumbo 3 views, 3 classes example\n================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n      'its classification\\nresults, the second and third sub-classifiers use '\n      'the data of view 1, while the\\nfourth one uses the data of '\n      'view 2:\\n'\n      '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n    ax = plt.subplot(3, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    for ind_class in range(3):\n        ind_samples = (y == ind_class)\n        ax.plot(X[ind_samples, ind_feature],\n                X[ind_samples, ind_feature + 1],\n                styles[ind_class],\n                label='Class {}'.format(ind_class))\n    ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n             'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n    best_view = clf.best_views_[ind_estimator]\n    y_pred = clf.estimators_[ind_estimator].predict(\n        X[:, 2*best_view:2*best_view+2])\n    background_color = (1.0, 1.0, 0.9)\n    for ind_view in range(3):\n        ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n        if ind_view == best_view:\n            ax.set_facecolor(background_color)\n        ax.set_title(\n            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n        ind_feature = ind_view * 2\n        for ind_class in range(3):\n            ind_samples = (y_pred == ind_class)\n            ax.plot(X[ind_samples, ind_feature],\n                    X[ind_samples, ind_feature + 1],\n                    styles[ind_class],\n                    label='Class {}'.format(ind_class))\n        ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py
deleted file mode 100644
index 62cd311..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-================================
-Mumbo 3 views, 3 classes example
-================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuMBo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.mumbo import MumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 300
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-y = np.zeros(3*n_samples, dtype=np.int64)
-y[n_samples:2*n_samples] = 1
-y[2*n_samples:] = 2
-
-views_ind = np.array([0, 2, 4, 6])
-
-n_estimators = 4
-clf = MumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 4 iterations, the MuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-print('\nThe resulting MuMBo classifier uses four sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_))
-
-print('\nThe first sub-classifier uses the data of view 0 to compute '
-      'its classification\nresults, the second and third sub-classifiers use '
-      'the data of view 1, while the\nfourth one uses the data of '
-      'view 2:\n'
-      '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\nthree views.')
-
-styles = ('.b', '.r', '.g')
-fig = plt.figure(figsize=(12., 11.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(3):
-    ax = plt.subplot(3, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    for ind_class in range(3):
-        ind_samples = (y == ind_class)
-        ax.plot(X[ind_samples, ind_feature],
-                X[ind_samples, ind_feature + 1],
-                styles[ind_class],
-                label='Class {}'.format(ind_class))
-    ax.legend(loc='upper left', framealpha=0.9)
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-fig = plt.figure(figsize=(14., 11.))
-fig.suptitle('Classification results on the learning data for the '
-             'sub-classifiers', size=16)
-for ind_estimator in range(n_estimators):
-    best_view = clf.best_views_[ind_estimator]
-    y_pred = clf.estimators_[ind_estimator].predict(
-        X[:, 2*best_view:2*best_view+2])
-    background_color = (1.0, 1.0, 0.9)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-        if ind_view == best_view:
-            ax.set_facecolor(background_color)
-        ax.set_title(
-            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y_pred == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst
deleted file mode 100644
index 4aedb22..0000000
--- a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes.rst
+++ /dev/null
@@ -1,164 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py:
-
-
-================================
-Mumbo 3 views, 3 classes example
-================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuMBo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.mumbo import MumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 300
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-    y = np.zeros(3*n_samples, dtype=np.int64)
-    y[n_samples:2*n_samples] = 1
-    y[2*n_samples:] = 2
-
-    views_ind = np.array([0, 2, 4, 6])
-
-    n_estimators = 4
-    clf = MumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 4 iterations, the MuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-    print('\nThe resulting MuMBo classifier uses four sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_))
-
-    print('\nThe first sub-classifier uses the data of view 0 to compute '
-          'its classification\nresults, the second and third sub-classifiers use '
-          'the data of view 1, while the\nfourth one uses the data of '
-          'view 2:\n'
-          '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\nthree views.')
-
-    styles = ('.b', '.r', '.g')
-    fig = plt.figure(figsize=(12., 11.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(loc='upper left', framealpha=0.9)
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    fig = plt.figure(figsize=(14., 11.))
-    fig.suptitle('Classification results on the learning data for the '
-                 'sub-classifiers', size=16)
-    for ind_estimator in range(n_estimators):
-        best_view = clf.best_views_[ind_estimator]
-        y_pred = clf.estimators_[ind_estimator].predict(
-            X[:, 2*best_view:2*best_view+2])
-        background_color = (1.0, 1.0, 0.9)
-        for ind_view in range(3):
-            ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-            if ind_view == best_view:
-                ax.set_facecolor(background_color)
-            ax.set_title(
-                'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-            ind_feature = ind_view * 2
-            for ind_class in range(3):
-                ind_samples = (y_pred == ind_class)
-                ax.plot(X[ind_samples, ind_feature],
-                        X[ind_samples, ind_feature + 1],
-                        styles[ind_class],
-                        label='Class {}'.format(ind_class))
-            ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mumbo_mumbo_plot_3_views_3_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mumbo_plot_3_views_3_classes.py <mumbo_plot_3_views_3_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mumbo_plot_3_views_3_classes.ipynb <mumbo_plot_3_views_3_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo/mumbo_plot_3_views_3_classes_codeobj.pickle
deleted file mode 100644
index 42c59a482683c7d52f4b885b7d85822c0db5552f..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 682
zcmb7B%SyyB6cw4NwH?OSO8f!cWF`K9Zbk41ByBU3K$2VXKq?4+Kz7EpziQH?%IJVh
zA#l&>J@+2&d7FKF?(dDCt6Silw1$Dy-qjDPBZ_o)tTq1KUp@LuSIQn?rSRZi1DZ)J
zh<EeExI#c@c_gG2{ovgq5l@<eGM*mVQO~5gxsG~TxOoD!oHVqeOY4?lO$)NchRE=8
zbFlYAZAea&L}A&1K$?&)OJ-vodKaT5lsY`Eei6yp3sImaX9;Yqxv;M4xj`$owrX-!
zWE4721U5Yf!&!s+Ay#qI7S_6>dn+2a<ydRBVnpSwf9NpvTv%>cI3?z&0nmoYH4;%C
yr=M;4>#eJ!Pi>pgM7s6C1b1^<j*}=nf1fngyUjQbdFF6(0?AS_<?(+&7JUKkJ_a8E

diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb
deleted file mode 100644
index bfbb2d3..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n================================\nMumbo 2 views, 2 classes example\n================================\n\nIn this toy example, we generate data from two classes, split between two\ntwo-dimensional views.\n\nFor each view, the data are generated so that half of the points of each class\nare well separated in the plane, while the other half of the points are not\nseparated and placed in the same area. We also insure that the points that are\nnot separated in one view are well separated in the other view.\n\nThus, in the figure representing the data, the points represented by crosses\n(x) are well separated in view 0 while they are not separated in view 1, while\nthe points represented by dots (.) are well separated in view 1 while they are\nnot separated in view 0. In this figure, the blue symbols represent points\nof class 0, while red symbols represent points of class 1.\n\nThe MuMBo algorithm take adavantage of the complementarity of the two views to\nrightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 100\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [1., 2.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1), axis=1)\n\ny = np.zeros(4*n_samples, dtype=np.int64)\ny[2*n_samples:] = 1\n\nviews_ind = np.array([0, 2, 4])\n\nn_estimators = 3\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 3 iterations, the MuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\n\nprint('\\nThe resulting MuMBo classifier uses three sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe two first sub-classifiers use the data of view 0 to compute '\n      'their\\nclassification results, while the third one uses the data of '\n      'view 1:\\n'\n      '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\ntwo views.')\n\nfig = plt.figure(figsize=(10., 8.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(2):\n    ax = plt.subplot(2, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    styles = ('.b', 'xb', '.r', 'xr')\n    labels = ('non-separated', 'separated')\n    for ind in range(4):\n        ind_class = ind // 2\n        label = labels[(ind + ind_view) % 2]\n        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],\n                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],\n                styles[ind],\n                label='Class {} ({})'.format(ind_class, label))\n    ax.legend()\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nstyles = ('.b', '.r')\nfig = plt.figure(figsize=(12., 7.))\nfig.suptitle('Classification results on the learning data for the '\n             'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n    best_view = clf.best_views_[ind_estimator]\n    y_pred = clf.estimators_[ind_estimator].predict(\n        X[:, 2*best_view:2*best_view+2])\n    background_color = (1.0, 1.0, 0.9)\n    for ind_view in range(2):\n        ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)\n        if ind_view == best_view:\n            ax.set_facecolor(background_color)\n        ax.set_title(\n            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n        ind_feature = ind_view * 2\n        for ind_class in range(2):\n            ind_samples = (y_pred == ind_class)\n            ax.plot(X[ind_samples, ind_feature],\n                    X[ind_samples, ind_feature + 1],\n                    styles[ind_class],\n                    label='Class {}'.format(ind_class))\n        ax.legend(title='Predicted class:')\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py
deleted file mode 100644
index 91f0dc7..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-================================
-Mumbo 2 views, 2 classes example
-================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.mumbo import MumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 100
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1), axis=1)
-
-y = np.zeros(4*n_samples, dtype=np.int64)
-y[2*n_samples:] = 1
-
-views_ind = np.array([0, 2, 4])
-
-n_estimators = 3
-clf = MumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 3 iterations, the MuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-print('\nThe resulting MuMBo classifier uses three sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_))
-
-print('\nThe two first sub-classifiers use the data of view 0 to compute '
-      'their\nclassification results, while the third one uses the data of '
-      'view 1:\n'
-      '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\ntwo views.')
-
-fig = plt.figure(figsize=(10., 8.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(2):
-    ax = plt.subplot(2, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    styles = ('.b', 'xb', '.r', 'xr')
-    labels = ('non-separated', 'separated')
-    for ind in range(4):
-        ind_class = ind // 2
-        label = labels[(ind + ind_view) % 2]
-        ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                styles[ind],
-                label='Class {} ({})'.format(ind_class, label))
-    ax.legend()
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-styles = ('.b', '.r')
-fig = plt.figure(figsize=(12., 7.))
-fig.suptitle('Classification results on the learning data for the '
-             'sub-classifiers', size=16)
-for ind_estimator in range(n_estimators):
-    best_view = clf.best_views_[ind_estimator]
-    y_pred = clf.estimators_[ind_estimator].predict(
-        X[:, 2*best_view:2*best_view+2])
-    background_color = (1.0, 1.0, 0.9)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-        if ind_view == best_view:
-            ax.set_facecolor(background_color)
-        ax.set_title(
-            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(2):
-            ind_samples = (y_pred == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(title='Predicted class:')
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst
deleted file mode 100644
index 91c4903..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py:
-
-
-================================
-Mumbo 2 views, 2 classes example
-================================
-
-In this toy example, we generate data from two classes, split between two
-two-dimensional views.
-
-For each view, the data are generated so that half of the points of each class
-are well separated in the plane, while the other half of the points are not
-separated and placed in the same area. We also insure that the points that are
-not separated in one view are well separated in the other view.
-
-Thus, in the figure representing the data, the points represented by crosses
-(x) are well separated in view 0 while they are not separated in view 1, while
-the points represented by dots (.) are well separated in view 1 while they are
-not separated in view 0. In this figure, the blue symbols represent points
-of class 0, while red symbols represent points of class 1.
-
-The MuMBo algorithm take adavantage of the complementarity of the two views to
-rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.mumbo import MumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 100
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [1., 2.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1), axis=1)
-
-    y = np.zeros(4*n_samples, dtype=np.int64)
-    y[2*n_samples:] = 1
-
-    views_ind = np.array([0, 2, 4])
-
-    n_estimators = 3
-    clf = MumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 3 iterations, the MuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-
-    print('\nThe resulting MuMBo classifier uses three sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_))
-
-    print('\nThe two first sub-classifiers use the data of view 0 to compute '
-          'their\nclassification results, while the third one uses the data of '
-          'view 1:\n'
-          '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\ntwo views.')
-
-    fig = plt.figure(figsize=(10., 8.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(2):
-        ax = plt.subplot(2, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        styles = ('.b', 'xb', '.r', 'xr')
-        labels = ('non-separated', 'separated')
-        for ind in range(4):
-            ind_class = ind // 2
-            label = labels[(ind + ind_view) % 2]
-            ax.plot(X[n_samples*ind:n_samples*(ind+1), ind_feature],
-                    X[n_samples*ind:n_samples*(ind+1), ind_feature + 1],
-                    styles[ind],
-                    label='Class {} ({})'.format(ind_class, label))
-        ax.legend()
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    styles = ('.b', '.r')
-    fig = plt.figure(figsize=(12., 7.))
-    fig.suptitle('Classification results on the learning data for the '
-                 'sub-classifiers', size=16)
-    for ind_estimator in range(n_estimators):
-        best_view = clf.best_views_[ind_estimator]
-        y_pred = clf.estimators_[ind_estimator].predict(
-            X[:, 2*best_view:2*best_view+2])
-        background_color = (1.0, 1.0, 0.9)
-        for ind_view in range(2):
-            ax = plt.subplot(2, 3, ind_estimator + 3*ind_view + 1)
-            if ind_view == best_view:
-                ax.set_facecolor(background_color)
-            ax.set_title(
-                'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-            ind_feature = ind_view * 2
-            for ind_class in range(2):
-                ind_samples = (y_pred == ind_class)
-                ax.plot(X[ind_samples, ind_feature],
-                        X[ind_samples, ind_feature + 1],
-                        styles[ind_class],
-                        label='Class {}'.format(ind_class))
-            ax.legend(title='Predicted class:')
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_2_views_2_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mumbo_plot_2_views_2_classes.py <mumbo_plot_2_views_2_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mumbo_plot_2_views_2_classes.ipynb <mumbo_plot_2_views_2_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo_plot_2_views_2_classes_codeobj.pickle
deleted file mode 100644
index 4c07e368e87374ab293d680db7ba94f21c8c98c2..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 682
zcmb7B%SyyB6cw4NwH<umMt?wblhxo4=vD-OK+-ld3FJ|dT%>~F2V`el`>Q5R#-TV1
zClI*jB<I}M$L#C-@L)WyZWZUGwM<FlUHzmxqDl9_TI)ak?X$merR)*b8V~+Gpqa#i
zcsEasD}?HRM?!+=2k#b%crjFI>*+DLvgZ~B_!{-JaI-O{r8^hc_FP%c8q^Ol^0eB;
zf;$N{Xe>J=kRhb|b&7FtT8zo_k7zO_lD<{4#g@o$vp)2`rGicFfmZYq+}So|%A7?-
zM*m5B`Us*?_Z%C;R*dLu@rMpxR|2?YVR)FMMk!;NTq6<Xv7d__uRe@6)c8iUGTC^Z
zz@(LHaP_zWXvPtzB^;Ry!{0sjP(`N|TZ7)ru*hgXpIA7P_HMZ;Jb_8!O91o((I5s^

diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb
deleted file mode 100644
index 371d83e..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n================================\nMumbo 3 views, 3 classes example\n================================\n\nIn this toy example, we generate data from three classes, split between three\ntwo-dimensional views.\n\nFor each view, the data are generated so that the points for two classes are\nwell seperated, while the points for the third class are not seperated with\nthe two other classes. That means that, taken separately, none of the single\nviews allows for a good classification of the data.\n\nNevertheless, the MuMBo algorithm take adavantage of the complementarity of\nthe views to rightly classify the points.\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nfrom multimodal.boosting.mumbo import MumboClassifier\nfrom matplotlib import pyplot as plt\n\n\ndef generate_data(n_samples, lim):\n    \"\"\"Generate random data in a rectangle\"\"\"\n    lim = np.array(lim)\n    n_features = lim.shape[0]\n    data = np.random.random((n_samples, n_features))\n    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]\n    return data\n\n\nseed = 12\nnp.random.seed(seed)\n\nn_samples = 300\n\nview_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]])))\n\nview_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]])))\n\nview_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),\n                         generate_data(n_samples, [[0., 1.], [0., 1.]]),\n                         generate_data(n_samples, [[1., 2.], [0., 1.]])))\n\nX = np.concatenate((view_0, view_1, view_2), axis=1)\n\ny = np.zeros(3*n_samples, dtype=np.int64)\ny[n_samples:2*n_samples] = 1\ny[2*n_samples:] = 2\n\nviews_ind = np.array([0, 2, 4, 6])\n\nn_estimators = 4\nclf = MumboClassifier(n_estimators=n_estimators)\nclf.fit(X, y, views_ind)\n\nprint('\\nAfter 4 iterations, the MuMBo classifier reaches exact '\n      'classification for the\\nlearning samples:')\nfor ind, score in enumerate(clf.staged_score(X, y)):\n    print('  - iteration {}, score: {}'.format(ind + 1, score))\n\nprint('\\nThe resulting MuMBo classifier uses four sub-classifiers that are '\n      'wheighted\\nusing the following weights:\\n'\n      '  estimator weights: {}'.format(clf.estimator_weights_))\n\nprint('\\nThe first sub-classifier uses the data of view 0 to compute '\n      'its classification\\nresults, the second and third sub-classifiers use '\n      'the data of view 1, while the\\nfourth one uses the data of '\n      'view 2:\\n'\n      '  best views: {}'. format(clf.best_views_))\n\nprint('\\nThe first figure displays the data, splitting the representation '\n      'between the\\nthree views.')\n\nstyles = ('.b', '.r', '.g')\nfig = plt.figure(figsize=(12., 11.))\nfig.suptitle('Representation of the data', size=16)\nfor ind_view in range(3):\n    ax = plt.subplot(3, 1, ind_view + 1)\n    ax.set_title('View {}'.format(ind_view))\n    ind_feature = ind_view * 2\n    for ind_class in range(3):\n        ind_samples = (y == ind_class)\n        ax.plot(X[ind_samples, ind_feature],\n                X[ind_samples, ind_feature + 1],\n                styles[ind_class],\n                label='Class {}'.format(ind_class))\n    ax.legend(loc='upper left', framealpha=0.9)\n\nprint('\\nThe second figure displays the classification results for the '\n      'sub-classifiers\\non the learning sample data.\\n')\n\nfig = plt.figure(figsize=(14., 11.))\nfig.suptitle('Classification results on the learning data for the '\n             'sub-classifiers', size=16)\nfor ind_estimator in range(n_estimators):\n    best_view = clf.best_views_[ind_estimator]\n    y_pred = clf.estimators_[ind_estimator].predict(\n        X[:, 2*best_view:2*best_view+2])\n    background_color = (1.0, 1.0, 0.9)\n    for ind_view in range(3):\n        ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)\n        if ind_view == best_view:\n            ax.set_facecolor(background_color)\n        ax.set_title(\n            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))\n        ind_feature = ind_view * 2\n        for ind_class in range(3):\n            ind_samples = (y_pred == ind_class)\n            ax.plot(X[ind_samples, ind_feature],\n                    X[ind_samples, ind_feature + 1],\n                    styles[ind_class],\n                    label='Class {}'.format(ind_class))\n        ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)\n\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py
deleted file mode 100644
index 62cd311..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-================================
-Mumbo 3 views, 3 classes example
-================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuMBo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-"""
-
-import numpy as np
-from multimodal.boosting.mumbo import MumboClassifier
-from matplotlib import pyplot as plt
-
-
-def generate_data(n_samples, lim):
-    """Generate random data in a rectangle"""
-    lim = np.array(lim)
-    n_features = lim.shape[0]
-    data = np.random.random((n_samples, n_features))
-    data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-    return data
-
-
-seed = 12
-np.random.seed(seed)
-
-n_samples = 300
-
-view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                         generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                         generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-y = np.zeros(3*n_samples, dtype=np.int64)
-y[n_samples:2*n_samples] = 1
-y[2*n_samples:] = 2
-
-views_ind = np.array([0, 2, 4, 6])
-
-n_estimators = 4
-clf = MumboClassifier(n_estimators=n_estimators)
-clf.fit(X, y, views_ind)
-
-print('\nAfter 4 iterations, the MuMBo classifier reaches exact '
-      'classification for the\nlearning samples:')
-for ind, score in enumerate(clf.staged_score(X, y)):
-    print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-print('\nThe resulting MuMBo classifier uses four sub-classifiers that are '
-      'wheighted\nusing the following weights:\n'
-      '  estimator weights: {}'.format(clf.estimator_weights_))
-
-print('\nThe first sub-classifier uses the data of view 0 to compute '
-      'its classification\nresults, the second and third sub-classifiers use '
-      'the data of view 1, while the\nfourth one uses the data of '
-      'view 2:\n'
-      '  best views: {}'. format(clf.best_views_))
-
-print('\nThe first figure displays the data, splitting the representation '
-      'between the\nthree views.')
-
-styles = ('.b', '.r', '.g')
-fig = plt.figure(figsize=(12., 11.))
-fig.suptitle('Representation of the data', size=16)
-for ind_view in range(3):
-    ax = plt.subplot(3, 1, ind_view + 1)
-    ax.set_title('View {}'.format(ind_view))
-    ind_feature = ind_view * 2
-    for ind_class in range(3):
-        ind_samples = (y == ind_class)
-        ax.plot(X[ind_samples, ind_feature],
-                X[ind_samples, ind_feature + 1],
-                styles[ind_class],
-                label='Class {}'.format(ind_class))
-    ax.legend(loc='upper left', framealpha=0.9)
-
-print('\nThe second figure displays the classification results for the '
-      'sub-classifiers\non the learning sample data.\n')
-
-fig = plt.figure(figsize=(14., 11.))
-fig.suptitle('Classification results on the learning data for the '
-             'sub-classifiers', size=16)
-for ind_estimator in range(n_estimators):
-    best_view = clf.best_views_[ind_estimator]
-    y_pred = clf.estimators_[ind_estimator].predict(
-        X[:, 2*best_view:2*best_view+2])
-    background_color = (1.0, 1.0, 0.9)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-        if ind_view == best_view:
-            ax.set_facecolor(background_color)
-        ax.set_title(
-            'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y_pred == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-plt.show()
diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst
deleted file mode 100644
index 9f11b4e..0000000
--- a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes.rst
+++ /dev/null
@@ -1,164 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py:
-
-
-================================
-Mumbo 3 views, 3 classes example
-================================
-
-In this toy example, we generate data from three classes, split between three
-two-dimensional views.
-
-For each view, the data are generated so that the points for two classes are
-well seperated, while the points for the third class are not seperated with
-the two other classes. That means that, taken separately, none of the single
-views allows for a good classification of the data.
-
-Nevertheless, the MuMBo algorithm take adavantage of the complementarity of
-the views to rightly classify the points.
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    from multimodal.boosting.mumbo import MumboClassifier
-    from matplotlib import pyplot as plt
-
-
-    def generate_data(n_samples, lim):
-        """Generate random data in a rectangle"""
-        lim = np.array(lim)
-        n_features = lim.shape[0]
-        data = np.random.random((n_samples, n_features))
-        data = (lim[:, 1]-lim[:, 0]) * data + lim[:, 0]
-        return data
-
-
-    seed = 12
-    np.random.seed(seed)
-
-    n_samples = 300
-
-    view_0 = np.concatenate((generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]])))
-
-    view_1 = np.concatenate((generate_data(n_samples, [[1., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]])))
-
-    view_2 = np.concatenate((generate_data(n_samples, [[0., 2.], [0., 1.]]),
-                             generate_data(n_samples, [[0., 1.], [0., 1.]]),
-                             generate_data(n_samples, [[1., 2.], [0., 1.]])))
-
-    X = np.concatenate((view_0, view_1, view_2), axis=1)
-
-    y = np.zeros(3*n_samples, dtype=np.int64)
-    y[n_samples:2*n_samples] = 1
-    y[2*n_samples:] = 2
-
-    views_ind = np.array([0, 2, 4, 6])
-
-    n_estimators = 4
-    clf = MumboClassifier(n_estimators=n_estimators)
-    clf.fit(X, y, views_ind)
-
-    print('\nAfter 4 iterations, the MuMBo classifier reaches exact '
-          'classification for the\nlearning samples:')
-    for ind, score in enumerate(clf.staged_score(X, y)):
-        print('  - iteration {}, score: {}'.format(ind + 1, score))
-
-    print('\nThe resulting MuMBo classifier uses four sub-classifiers that are '
-          'wheighted\nusing the following weights:\n'
-          '  estimator weights: {}'.format(clf.estimator_weights_))
-
-    print('\nThe first sub-classifier uses the data of view 0 to compute '
-          'its classification\nresults, the second and third sub-classifiers use '
-          'the data of view 1, while the\nfourth one uses the data of '
-          'view 2:\n'
-          '  best views: {}'. format(clf.best_views_))
-
-    print('\nThe first figure displays the data, splitting the representation '
-          'between the\nthree views.')
-
-    styles = ('.b', '.r', '.g')
-    fig = plt.figure(figsize=(12., 11.))
-    fig.suptitle('Representation of the data', size=16)
-    for ind_view in range(3):
-        ax = plt.subplot(3, 1, ind_view + 1)
-        ax.set_title('View {}'.format(ind_view))
-        ind_feature = ind_view * 2
-        for ind_class in range(3):
-            ind_samples = (y == ind_class)
-            ax.plot(X[ind_samples, ind_feature],
-                    X[ind_samples, ind_feature + 1],
-                    styles[ind_class],
-                    label='Class {}'.format(ind_class))
-        ax.legend(loc='upper left', framealpha=0.9)
-
-    print('\nThe second figure displays the classification results for the '
-          'sub-classifiers\non the learning sample data.\n')
-
-    fig = plt.figure(figsize=(14., 11.))
-    fig.suptitle('Classification results on the learning data for the '
-                 'sub-classifiers', size=16)
-    for ind_estimator in range(n_estimators):
-        best_view = clf.best_views_[ind_estimator]
-        y_pred = clf.estimators_[ind_estimator].predict(
-            X[:, 2*best_view:2*best_view+2])
-        background_color = (1.0, 1.0, 0.9)
-        for ind_view in range(3):
-            ax = plt.subplot(3, 4, ind_estimator + 4*ind_view + 1)
-            if ind_view == best_view:
-                ax.set_facecolor(background_color)
-            ax.set_title(
-                'Sub-classifier {} - View {}'.format(ind_estimator, ind_view))
-            ind_feature = ind_view * 2
-            for ind_class in range(3):
-                ind_samples = (y_pred == ind_class)
-                ax.plot(X[ind_samples, ind_feature],
-                        X[ind_samples, ind_feature + 1],
-                        styles[ind_class],
-                        label='Class {}'.format(ind_class))
-            ax.legend(title='Predicted class:', loc='upper left', framealpha=0.9)
-
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mumbo_plot_3_views_3_classes.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mumbo_plot_3_views_3_classes.py <mumbo_plot_3_views_3_classes.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mumbo_plot_3_views_3_classes.ipynb <mumbo_plot_3_views_3_classes.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle b/doc/tutorial/auto_examples/mumbo_plot_3_views_3_classes_codeobj.pickle
deleted file mode 100644
index 4c07e368e87374ab293d680db7ba94f21c8c98c2..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 682
zcmb7B%SyyB6cw4NwH<umMt?wblhxo4=vD-OK+-ld3FJ|dT%>~F2V`el`>Q5R#-TV1
zClI*jB<I}M$L#C-@L)WyZWZUGwM<FlUHzmxqDl9_TI)ak?X$merR)*b8V~+Gpqa#i
zcsEasD}?HRM?!+=2k#b%crjFI>*+DLvgZ~B_!{-JaI-O{r8^hc_FP%c8q^Ol^0eB;
zf;$N{Xe>J=kRhb|b&7FtT8zo_k7zO_lD<{4#g@o$vp)2`rGicFfmZYq+}So|%A7?-
zM*m5B`Us*?_Z%C;R*dLu@rMpxR|2?YVR)FMMk!;NTq6<Xv7d__uRe@6)c8iUGTC^Z
zz@(LHaP_zWXvPtzB^;Ry!{0sjP(`N|TZ7)ru*hgXpIA7P_HMZ;Jb_8!O91o((I5s^

diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb b/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb
deleted file mode 100644
index 830427c..0000000
--- a/doc/tutorial/auto_examples/mvml/mvml_plot_.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n# MVML\n\nDemonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset\n\nDemonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see\nhttp://scikit-learn.org/stable/\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.datasets.data_sample import DataSample\nfrom multimodal.tests.datasets.get_dataset_path import get_dataset_path\n\n\nnp.random.seed(4)\n\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1\n\n# show data\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1\n\n# show data\nplt.figure(figsize=(10., 8.))\nplt.subplot(121)\nplt.scatter(X0[:, 0], X0[:, 1], c=Y)\nplt.title(\"all data, view 1\")\nplt.subplot(122)\nplt.scatter(X1[:, 0], X1[:, 1], c=Y)\nplt.title(\"all data, view 2\")\nplt.show()\n\n# shuffle\norder = np.random.permutation(n_tot)\nX0 = X0[order, :]\nX1 = X1[order, :]\nY = Y[order]\n\n# make kernel dictionaries\nkernel_dict = {}\ntest_kernel_dict = {}\nkernel_dict[0] = rbf_kernel(X0[0:n_tr, :])\nkernel_dict[1] = rbf_kernel(X1[0:n_tr, :])\ntest_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])\ntest_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])\n\n# input_x = get_dataset_path(\"input_x_dic.pkl\")\n# f = open(input_x, \"wb\")\n# pickle.dump(input_x, f)\n#\n#\n# d= DataSample(kernel_dict)\n# a = d.data\n#\n# =========== use MVML in classifying the data ============\n#\n# demo on how the code is intended to be used; parameters are not cross-validated, just picked some\n# # with approximation\n# # default: learn A, don't learn w   (learn_A=1, learn_w=0)\nmvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')\nmvml.fit(kernel_dict, Y[0:n_tr])\n\n\n#\n\npred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result\n#\n# without approximation\nmvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation\nmvml2.fit(kernel_dict, Y[0:n_tr])\npred2 = np.sign(mvml2.predict(test_kernel_dict))\n#\n# use MVML_Cov, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')\nmvml3.fit(kernel_dict, Y[0:n_tr])\npred3 = np.sign(mvml.predict(test_kernel_dict))\n#\n# use MVML_I, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')\n\npred4 = np.sign(mvml.predict(test_kernel_dict))\n#\n#\n# # =========== show results ============\n#\n# # accuracies\nacc1 = accuracy_score(Y[n_tr:n_tot], pred1)\nacc2 = accuracy_score(Y[n_tr:n_tot], pred2)\nacc3 = accuracy_score(Y[n_tr:n_tot], pred3)\nacc4 = accuracy_score(Y[n_tr:n_tot], pred4)\n#\n# # display obtained accuracies\n#\nprint(\"MVML:       \", acc1)\nprint(\"MVMLsparse: \", acc2)\nprint(\"MVML_Cov:   \", acc3)\nprint(\"MVML_I:     \", acc4)\n#\n#\n# # plot data and some classification results\n#\nplt.figure(2, figsize=(10., 8.))\nplt.subplot(341)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 1\")\nplt.subplot(342)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 2\")\n#\npred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0\npred1 = pred1.reshape((pred1.shape[0]))\nplt.subplot(343)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 1\")\nplt.subplot(344)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 2\")\n#\npred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0\npred2 = pred2.reshape((pred2.shape[0]))\nplt.subplot(345)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 1\")\nplt.subplot(346)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 2\")\n#\npred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0\npred3 = pred3.reshape((pred3.shape[0]))\n#\nplt.subplot(347)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 1\")\nplt.subplot(348)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 2\")\n#\npred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0\npred4 = pred4.reshape((pred4.shape[0]))\nplt.subplot(349)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 1\")\nplt.subplot(3,4,10)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 2\")\n#\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot_.py b/doc/tutorial/auto_examples/mvml/mvml_plot_.py
deleted file mode 100644
index 1abf1ea..0000000
--- a/doc/tutorial/auto_examples/mvml/mvml_plot_.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-====
-MVML
-====
-Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset
-
-Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see
-http://scikit-learn.org/stable/
-"""
-
-import numpy as np
-import matplotlib.pyplot as plt
-from sklearn import datasets
-from sklearn.metrics import accuracy_score
-from sklearn.metrics.pairwise import rbf_kernel
-from multimodal.kernels.mvml import MVML
-from multimodal.datasets.data_sample import DataSample
-from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-
-
-np.random.seed(4)
-
-# =========== create a simple dataset ============
-
-n_tot = 200
-half = int(n_tot/2)
-n_tr = 120
-
-# create a bit more data than needed so that we can take "half" amount of samples for each class
-X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-# make multi-view correspondence (select equal number of samples for both classes and order the data same way
-# in both views)
-
-yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-X0 = X0[yinds0, :]
-X1 = X1[yinds1, :]
-Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-# show data
-# =========== create a simple dataset ============
-
-n_tot = 200
-half = int(n_tot/2)
-n_tr = 120
-
-# create a bit more data than needed so that we can take "half" amount of samples for each class
-X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-# make multi-view correspondence (select equal number of samples for both classes and order the data same way
-# in both views)
-
-yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-X0 = X0[yinds0, :]
-X1 = X1[yinds1, :]
-Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-# show data
-plt.figure(figsize=(10., 8.))
-plt.subplot(121)
-plt.scatter(X0[:, 0], X0[:, 1], c=Y)
-plt.title("all data, view 1")
-plt.subplot(122)
-plt.scatter(X1[:, 0], X1[:, 1], c=Y)
-plt.title("all data, view 2")
-plt.show()
-
-# shuffle
-order = np.random.permutation(n_tot)
-X0 = X0[order, :]
-X1 = X1[order, :]
-Y = Y[order]
-
-# make kernel dictionaries
-kernel_dict = {}
-test_kernel_dict = {}
-kernel_dict[0] = rbf_kernel(X0[0:n_tr, :])
-kernel_dict[1] = rbf_kernel(X1[0:n_tr, :])
-test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])
-test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])
-
-# input_x = get_dataset_path("input_x_dic.pkl")
-# f = open(input_x, "wb")
-# pickle.dump(input_x, f)
-#
-#
-# d= DataSample(kernel_dict)
-# a = d.data
-#
-# =========== use MVML in classifying the data ============
-#
-# demo on how the code is intended to be used; parameters are not cross-validated, just picked some
-# # with approximation
-# # default: learn A, don't learn w   (learn_A=1, learn_w=0)
-mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')
-mvml.fit(kernel_dict, Y[0:n_tr])
-
-
-#
-
-pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result
-#
-# without approximation
-mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation
-mvml2.fit(kernel_dict, Y[0:n_tr])
-pred2 = np.sign(mvml2.predict(test_kernel_dict))
-#
-# use MVML_Cov, don't learn w
-mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')
-mvml3.fit(kernel_dict, Y[0:n_tr])
-pred3 = np.sign(mvml.predict(test_kernel_dict))
-#
-# use MVML_I, don't learn w
-mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')
-
-pred4 = np.sign(mvml.predict(test_kernel_dict))
-#
-#
-# # =========== show results ============
-#
-# # accuracies
-acc1 = accuracy_score(Y[n_tr:n_tot], pred1)
-acc2 = accuracy_score(Y[n_tr:n_tot], pred2)
-acc3 = accuracy_score(Y[n_tr:n_tot], pred3)
-acc4 = accuracy_score(Y[n_tr:n_tot], pred4)
-#
-# # display obtained accuracies
-#
-print("MVML:       ", acc1)
-print("MVMLsparse: ", acc2)
-print("MVML_Cov:   ", acc3)
-print("MVML_I:     ", acc4)
-#
-#
-# # plot data and some classification results
-#
-plt.figure(2, figsize=(10., 8.))
-plt.subplot(341)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-plt.title("orig. view 1")
-plt.subplot(342)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-plt.title("orig. view 2")
-#
-pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0
-pred1 = pred1.reshape((pred1.shape[0]))
-plt.subplot(343)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)
-plt.title("MVML view 1")
-plt.subplot(344)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)
-plt.title("MVML view 2")
-#
-pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0
-pred2 = pred2.reshape((pred2.shape[0]))
-plt.subplot(345)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)
-plt.title("MVMLsparse view 1")
-plt.subplot(346)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)
-plt.title("MVMLsparse view 2")
-#
-pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0
-pred3 = pred3.reshape((pred3.shape[0]))
-#
-plt.subplot(347)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)
-plt.title("MVML_Cov view 1")
-plt.subplot(348)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)
-plt.title("MVML_Cov view 2")
-#
-pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0
-pred4 = pred4.reshape((pred4.shape[0]))
-plt.subplot(349)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)
-plt.title("MVML_I view 1")
-plt.subplot(3,4,10)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)
-plt.title("MVML_I view 2")
-#
-plt.show()
diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot_.rst b/doc/tutorial/auto_examples/mvml/mvml_plot_.rst
deleted file mode 100644
index 490cadf..0000000
--- a/doc/tutorial/auto_examples/mvml/mvml_plot_.rst
+++ /dev/null
@@ -1,232 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mvml_mvml_plot_.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mvml_mvml_plot_.py:
-
-
-====
-MVML
-====
-Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset
-
-Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see
-http://scikit-learn.org/stable/
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    import matplotlib.pyplot as plt
-    from sklearn import datasets
-    from sklearn.metrics import accuracy_score
-    from sklearn.metrics.pairwise import rbf_kernel
-    from multimodal.kernels.mvml import MVML
-    from multimodal.datasets.data_sample import DataSample
-    from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-
-
-    np.random.seed(4)
-
-    # =========== create a simple dataset ============
-
-    n_tot = 200
-    half = int(n_tot/2)
-    n_tr = 120
-
-    # create a bit more data than needed so that we can take "half" amount of samples for each class
-    X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-    X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-    # make multi-view correspondence (select equal number of samples for both classes and order the data same way
-    # in both views)
-
-    yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-    yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-    X0 = X0[yinds0, :]
-    X1 = X1[yinds1, :]
-    Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-    # show data
-    # =========== create a simple dataset ============
-
-    n_tot = 200
-    half = int(n_tot/2)
-    n_tr = 120
-
-    # create a bit more data than needed so that we can take "half" amount of samples for each class
-    X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-    X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-    # make multi-view correspondence (select equal number of samples for both classes and order the data same way
-    # in both views)
-
-    yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-    yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-    X0 = X0[yinds0, :]
-    X1 = X1[yinds1, :]
-    Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-    # show data
-    plt.figure(figsize=(10., 8.))
-    plt.subplot(121)
-    plt.scatter(X0[:, 0], X0[:, 1], c=Y)
-    plt.title("all data, view 1")
-    plt.subplot(122)
-    plt.scatter(X1[:, 0], X1[:, 1], c=Y)
-    plt.title("all data, view 2")
-    plt.show()
-
-    # shuffle
-    order = np.random.permutation(n_tot)
-    X0 = X0[order, :]
-    X1 = X1[order, :]
-    Y = Y[order]
-
-    # make kernel dictionaries
-    kernel_dict = {}
-    test_kernel_dict = {}
-    kernel_dict[0] = rbf_kernel(X0[0:n_tr, :])
-    kernel_dict[1] = rbf_kernel(X1[0:n_tr, :])
-    test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])
-    test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])
-
-    # input_x = get_dataset_path("input_x_dic.pkl")
-    # f = open(input_x, "wb")
-    # pickle.dump(input_x, f)
-    #
-    #
-    # d= DataSample(kernel_dict)
-    # a = d.data
-    #
-    # =========== use MVML in classifying the data ============
-    #
-    # demo on how the code is intended to be used; parameters are not cross-validated, just picked some
-    # # with approximation
-    # # default: learn A, don't learn w   (learn_A=1, learn_w=0)
-    mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')
-    mvml.fit(kernel_dict, Y[0:n_tr])
-
-
-    #
-
-    pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result
-    #
-    # without approximation
-    mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation
-    mvml2.fit(kernel_dict, Y[0:n_tr])
-    pred2 = np.sign(mvml2.predict(test_kernel_dict))
-    #
-    # use MVML_Cov, don't learn w
-    mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')
-    mvml3.fit(kernel_dict, Y[0:n_tr])
-    pred3 = np.sign(mvml.predict(test_kernel_dict))
-    #
-    # use MVML_I, don't learn w
-    mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')
-
-    pred4 = np.sign(mvml.predict(test_kernel_dict))
-    #
-    #
-    # # =========== show results ============
-    #
-    # # accuracies
-    acc1 = accuracy_score(Y[n_tr:n_tot], pred1)
-    acc2 = accuracy_score(Y[n_tr:n_tot], pred2)
-    acc3 = accuracy_score(Y[n_tr:n_tot], pred3)
-    acc4 = accuracy_score(Y[n_tr:n_tot], pred4)
-    #
-    # # display obtained accuracies
-    #
-    print("MVML:       ", acc1)
-    print("MVMLsparse: ", acc2)
-    print("MVML_Cov:   ", acc3)
-    print("MVML_I:     ", acc4)
-    #
-    #
-    # # plot data and some classification results
-    #
-    plt.figure(2, figsize=(10., 8.))
-    plt.subplot(341)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-    plt.title("orig. view 1")
-    plt.subplot(342)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-    plt.title("orig. view 2")
-    #
-    pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0
-    pred1 = pred1.reshape((pred1.shape[0]))
-    plt.subplot(343)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)
-    plt.title("MVML view 1")
-    plt.subplot(344)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)
-    plt.title("MVML view 2")
-    #
-    pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0
-    pred2 = pred2.reshape((pred2.shape[0]))
-    plt.subplot(345)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)
-    plt.title("MVMLsparse view 1")
-    plt.subplot(346)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)
-    plt.title("MVMLsparse view 2")
-    #
-    pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0
-    pred3 = pred3.reshape((pred3.shape[0]))
-    #
-    plt.subplot(347)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)
-    plt.title("MVML_Cov view 1")
-    plt.subplot(348)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)
-    plt.title("MVML_Cov view 2")
-    #
-    pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0
-    pred4 = pred4.reshape((pred4.shape[0]))
-    plt.subplot(349)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)
-    plt.title("MVML_I view 1")
-    plt.subplot(3,4,10)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)
-    plt.title("MVML_I view 2")
-    #
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mvml_mvml_plot_.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mvml_plot_.py <mvml_plot_.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mvml_plot_.ipynb <mvml_plot_.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle b/doc/tutorial/auto_examples/mvml/mvml_plot__codeobj.pickle
deleted file mode 100644
index 4e0f7c5674378aad85a3f18704b6ebf847fb2265..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1176
zcmbVMOK#gh4DCYx)Haf~L6MxGNH(*|A_wTQL4ho~HO3<*8hCz~PYo9+kONFsQSV!h
zM&m|tQq<Lod?NYCM@skAuZQYQ_ry8VLICqzE7!*PS6hRH`ZH&~eQYmo+VeQGtJ~!o
ztOC*gM88s0LOX!tSn4_mM9<t>0JVxp@Rl=24CQvi9ll!Rn5Ap9^?!>DaiR=y{7xml
zW*n=|Zl-ahf;8K<nKj2DzBR%V&pp!Suu*m}?!Bbm2<-)55wlGe=DdW4I7O;WgEN;R
z*%^?Xg?Lg^PaSwiBwfQ&>LgdDojpM??OjozWF+c+FvKHdwp7(x5(iN4W=%HMS#-1Z
z5vj%y$RcS4rH!hy2Yr7E@#7y716Br4)24f_`BUg$D68vl*I!DbosC(iG6>Icxm1vR
zCMeQ(DN@fL_TRNx^B-V!X?>KPLGPEiVC<b51FC2<?!7_a6%JXX&dXukGO^QT(XFRo
zn7Y4<R~9r&2X@Qv!tOZ6y@yo}II(%E;~4cGY%kuVxc^frth$TN*Eey!dswAu*Hfc6
pWt;d_ZqnHuFxL@fELh66OULw1qvt5rJT9L^|5Hepv4@2~egpiG$4USI

diff --git a/doc/tutorial/auto_examples/mvml_plot_.ipynb b/doc/tutorial/auto_examples/mvml_plot_.ipynb
deleted file mode 100644
index 830427c..0000000
--- a/doc/tutorial/auto_examples/mvml_plot_.ipynb
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "%matplotlib inline"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {},
-      "source": [
-        "\n# MVML\n\nDemonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset\n\nDemonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see\nhttp://scikit-learn.org/stable/\n\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "collapsed": false
-      },
-      "outputs": [],
-      "source": [
-        "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom multimodal.kernels.mvml import MVML\nfrom multimodal.datasets.data_sample import DataSample\nfrom multimodal.tests.datasets.get_dataset_path import get_dataset_path\n\n\nnp.random.seed(4)\n\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1\n\n# show data\n# =========== create a simple dataset ============\n\nn_tot = 200\nhalf = int(n_tot/2)\nn_tr = 120\n\n# create a bit more data than needed so that we can take \"half\" amount of samples for each class\nX0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)\nX1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)\n\n# make multi-view correspondence (select equal number of samples for both classes and order the data same way\n# in both views)\n\nyinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])\nyinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])\n\nX0 = X0[yinds0, :]\nX1 = X1[yinds1, :]\nY = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1\n\n# show data\nplt.figure(figsize=(10., 8.))\nplt.subplot(121)\nplt.scatter(X0[:, 0], X0[:, 1], c=Y)\nplt.title(\"all data, view 1\")\nplt.subplot(122)\nplt.scatter(X1[:, 0], X1[:, 1], c=Y)\nplt.title(\"all data, view 2\")\nplt.show()\n\n# shuffle\norder = np.random.permutation(n_tot)\nX0 = X0[order, :]\nX1 = X1[order, :]\nY = Y[order]\n\n# make kernel dictionaries\nkernel_dict = {}\ntest_kernel_dict = {}\nkernel_dict[0] = rbf_kernel(X0[0:n_tr, :])\nkernel_dict[1] = rbf_kernel(X1[0:n_tr, :])\ntest_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])\ntest_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])\n\n# input_x = get_dataset_path(\"input_x_dic.pkl\")\n# f = open(input_x, \"wb\")\n# pickle.dump(input_x, f)\n#\n#\n# d= DataSample(kernel_dict)\n# a = d.data\n#\n# =========== use MVML in classifying the data ============\n#\n# demo on how the code is intended to be used; parameters are not cross-validated, just picked some\n# # with approximation\n# # default: learn A, don't learn w   (learn_A=1, learn_w=0)\nmvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')\nmvml.fit(kernel_dict, Y[0:n_tr])\n\n\n#\n\npred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result\n#\n# without approximation\nmvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation\nmvml2.fit(kernel_dict, Y[0:n_tr])\npred2 = np.sign(mvml2.predict(test_kernel_dict))\n#\n# use MVML_Cov, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')\nmvml3.fit(kernel_dict, Y[0:n_tr])\npred3 = np.sign(mvml.predict(test_kernel_dict))\n#\n# use MVML_I, don't learn w\nmvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')\n\npred4 = np.sign(mvml.predict(test_kernel_dict))\n#\n#\n# # =========== show results ============\n#\n# # accuracies\nacc1 = accuracy_score(Y[n_tr:n_tot], pred1)\nacc2 = accuracy_score(Y[n_tr:n_tot], pred2)\nacc3 = accuracy_score(Y[n_tr:n_tot], pred3)\nacc4 = accuracy_score(Y[n_tr:n_tot], pred4)\n#\n# # display obtained accuracies\n#\nprint(\"MVML:       \", acc1)\nprint(\"MVMLsparse: \", acc2)\nprint(\"MVML_Cov:   \", acc3)\nprint(\"MVML_I:     \", acc4)\n#\n#\n# # plot data and some classification results\n#\nplt.figure(2, figsize=(10., 8.))\nplt.subplot(341)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 1\")\nplt.subplot(342)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])\nplt.title(\"orig. view 2\")\n#\npred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0\npred1 = pred1.reshape((pred1.shape[0]))\nplt.subplot(343)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 1\")\nplt.subplot(344)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)\nplt.title(\"MVML view 2\")\n#\npred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0\npred2 = pred2.reshape((pred2.shape[0]))\nplt.subplot(345)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 1\")\nplt.subplot(346)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)\nplt.title(\"MVMLsparse view 2\")\n#\npred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0\npred3 = pred3.reshape((pred3.shape[0]))\n#\nplt.subplot(347)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 1\")\nplt.subplot(348)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)\nplt.title(\"MVML_Cov view 2\")\n#\npred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0\npred4 = pred4.reshape((pred4.shape[0]))\nplt.subplot(349)\nplt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 1\")\nplt.subplot(3,4,10)\nplt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)\nplt.title(\"MVML_I view 2\")\n#\nplt.show()"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": "Python 3",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.6.8"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/tutorial/auto_examples/mvml_plot_.py b/doc/tutorial/auto_examples/mvml_plot_.py
deleted file mode 100644
index 1abf1ea..0000000
--- a/doc/tutorial/auto_examples/mvml_plot_.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-====
-MVML
-====
-Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset
-
-Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see
-http://scikit-learn.org/stable/
-"""
-
-import numpy as np
-import matplotlib.pyplot as plt
-from sklearn import datasets
-from sklearn.metrics import accuracy_score
-from sklearn.metrics.pairwise import rbf_kernel
-from multimodal.kernels.mvml import MVML
-from multimodal.datasets.data_sample import DataSample
-from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-
-
-np.random.seed(4)
-
-# =========== create a simple dataset ============
-
-n_tot = 200
-half = int(n_tot/2)
-n_tr = 120
-
-# create a bit more data than needed so that we can take "half" amount of samples for each class
-X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-# make multi-view correspondence (select equal number of samples for both classes and order the data same way
-# in both views)
-
-yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-X0 = X0[yinds0, :]
-X1 = X1[yinds1, :]
-Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-# show data
-# =========== create a simple dataset ============
-
-n_tot = 200
-half = int(n_tot/2)
-n_tr = 120
-
-# create a bit more data than needed so that we can take "half" amount of samples for each class
-X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-# make multi-view correspondence (select equal number of samples for both classes and order the data same way
-# in both views)
-
-yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-X0 = X0[yinds0, :]
-X1 = X1[yinds1, :]
-Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-# show data
-plt.figure(figsize=(10., 8.))
-plt.subplot(121)
-plt.scatter(X0[:, 0], X0[:, 1], c=Y)
-plt.title("all data, view 1")
-plt.subplot(122)
-plt.scatter(X1[:, 0], X1[:, 1], c=Y)
-plt.title("all data, view 2")
-plt.show()
-
-# shuffle
-order = np.random.permutation(n_tot)
-X0 = X0[order, :]
-X1 = X1[order, :]
-Y = Y[order]
-
-# make kernel dictionaries
-kernel_dict = {}
-test_kernel_dict = {}
-kernel_dict[0] = rbf_kernel(X0[0:n_tr, :])
-kernel_dict[1] = rbf_kernel(X1[0:n_tr, :])
-test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])
-test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])
-
-# input_x = get_dataset_path("input_x_dic.pkl")
-# f = open(input_x, "wb")
-# pickle.dump(input_x, f)
-#
-#
-# d= DataSample(kernel_dict)
-# a = d.data
-#
-# =========== use MVML in classifying the data ============
-#
-# demo on how the code is intended to be used; parameters are not cross-validated, just picked some
-# # with approximation
-# # default: learn A, don't learn w   (learn_A=1, learn_w=0)
-mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')
-mvml.fit(kernel_dict, Y[0:n_tr])
-
-
-#
-
-pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result
-#
-# without approximation
-mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation
-mvml2.fit(kernel_dict, Y[0:n_tr])
-pred2 = np.sign(mvml2.predict(test_kernel_dict))
-#
-# use MVML_Cov, don't learn w
-mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')
-mvml3.fit(kernel_dict, Y[0:n_tr])
-pred3 = np.sign(mvml.predict(test_kernel_dict))
-#
-# use MVML_I, don't learn w
-mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')
-
-pred4 = np.sign(mvml.predict(test_kernel_dict))
-#
-#
-# # =========== show results ============
-#
-# # accuracies
-acc1 = accuracy_score(Y[n_tr:n_tot], pred1)
-acc2 = accuracy_score(Y[n_tr:n_tot], pred2)
-acc3 = accuracy_score(Y[n_tr:n_tot], pred3)
-acc4 = accuracy_score(Y[n_tr:n_tot], pred4)
-#
-# # display obtained accuracies
-#
-print("MVML:       ", acc1)
-print("MVMLsparse: ", acc2)
-print("MVML_Cov:   ", acc3)
-print("MVML_I:     ", acc4)
-#
-#
-# # plot data and some classification results
-#
-plt.figure(2, figsize=(10., 8.))
-plt.subplot(341)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-plt.title("orig. view 1")
-plt.subplot(342)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-plt.title("orig. view 2")
-#
-pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0
-pred1 = pred1.reshape((pred1.shape[0]))
-plt.subplot(343)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)
-plt.title("MVML view 1")
-plt.subplot(344)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)
-plt.title("MVML view 2")
-#
-pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0
-pred2 = pred2.reshape((pred2.shape[0]))
-plt.subplot(345)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)
-plt.title("MVMLsparse view 1")
-plt.subplot(346)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)
-plt.title("MVMLsparse view 2")
-#
-pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0
-pred3 = pred3.reshape((pred3.shape[0]))
-#
-plt.subplot(347)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)
-plt.title("MVML_Cov view 1")
-plt.subplot(348)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)
-plt.title("MVML_Cov view 2")
-#
-pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0
-pred4 = pred4.reshape((pred4.shape[0]))
-plt.subplot(349)
-plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)
-plt.title("MVML_I view 1")
-plt.subplot(3,4,10)
-plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)
-plt.title("MVML_I view 2")
-#
-plt.show()
diff --git a/doc/tutorial/auto_examples/mvml_plot_.rst b/doc/tutorial/auto_examples/mvml_plot_.rst
deleted file mode 100644
index 7d5e65d..0000000
--- a/doc/tutorial/auto_examples/mvml_plot_.rst
+++ /dev/null
@@ -1,232 +0,0 @@
-.. note::
-    :class: sphx-glr-download-link-note
-
-    Click :ref:`here <sphx_glr_download_tutorial_auto_examples_mvml_plot_.py>` to download the full example code
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_tutorial_auto_examples_mvml_plot_.py:
-
-
-====
-MVML
-====
-Demonstration on how MVML (in file mvml.py) is intended to be used with very simple simulated dataset
-
-Demonstration uses scikit-learn for retrieving datasets and for calculating rbf kernel function, see
-http://scikit-learn.org/stable/
-
-
-.. code-block:: default
-
-
-    import numpy as np
-    import matplotlib.pyplot as plt
-    from sklearn import datasets
-    from sklearn.metrics import accuracy_score
-    from sklearn.metrics.pairwise import rbf_kernel
-    from multimodal.kernels.mvml import MVML
-    from multimodal.datasets.data_sample import DataSample
-    from multimodal.tests.datasets.get_dataset_path import get_dataset_path
-
-
-    np.random.seed(4)
-
-    # =========== create a simple dataset ============
-
-    n_tot = 200
-    half = int(n_tot/2)
-    n_tr = 120
-
-    # create a bit more data than needed so that we can take "half" amount of samples for each class
-    X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-    X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-    # make multi-view correspondence (select equal number of samples for both classes and order the data same way
-    # in both views)
-
-    yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-    yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-    X0 = X0[yinds0, :]
-    X1 = X1[yinds1, :]
-    Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-    # show data
-    # =========== create a simple dataset ============
-
-    n_tot = 200
-    half = int(n_tot/2)
-    n_tr = 120
-
-    # create a bit more data than needed so that we can take "half" amount of samples for each class
-    X0, y0 = datasets.make_moons(n_samples=n_tot+2, noise=0.3, shuffle=False)
-    X1, y1 = datasets.make_circles(n_samples=n_tot+2, noise=0.1, shuffle=False)
-
-    # make multi-view correspondence (select equal number of samples for both classes and order the data same way
-    # in both views)
-
-    yinds0 = np.append(np.where(y0 == 0)[0][0:half], np.where(y0 == 1)[0][0:half])
-    yinds1 = np.append(np.where(y1 == 0)[0][0:half], np.where(y1 == 1)[0][0:half])
-
-    X0 = X0[yinds0, :]
-    X1 = X1[yinds1, :]
-    Y = np.append(np.zeros(half)-1, np.ones(half))  # labels -1 and 1
-
-    # show data
-    plt.figure(figsize=(10., 8.))
-    plt.subplot(121)
-    plt.scatter(X0[:, 0], X0[:, 1], c=Y)
-    plt.title("all data, view 1")
-    plt.subplot(122)
-    plt.scatter(X1[:, 0], X1[:, 1], c=Y)
-    plt.title("all data, view 2")
-    plt.show()
-
-    # shuffle
-    order = np.random.permutation(n_tot)
-    X0 = X0[order, :]
-    X1 = X1[order, :]
-    Y = Y[order]
-
-    # make kernel dictionaries
-    kernel_dict = {}
-    test_kernel_dict = {}
-    kernel_dict[0] = rbf_kernel(X0[0:n_tr, :])
-    kernel_dict[1] = rbf_kernel(X1[0:n_tr, :])
-    test_kernel_dict[0] = rbf_kernel(X0[n_tr:n_tot, :], X0[0:n_tr, :])
-    test_kernel_dict[1] = rbf_kernel(X1[n_tr:n_tot, :], X1[0:n_tr, :])
-
-    # input_x = get_dataset_path("input_x_dic.pkl")
-    # f = open(input_x, "wb")
-    # pickle.dump(input_x, f)
-    #
-    #
-    # d= DataSample(kernel_dict)
-    # a = d.data
-    #
-    # =========== use MVML in classifying the data ============
-    #
-    # demo on how the code is intended to be used; parameters are not cross-validated, just picked some
-    # # with approximation
-    # # default: learn A, don't learn w   (learn_A=1, learn_w=0)
-    mvml = MVML(lmbda=0.1, eta=1, nystrom_param=0.2, kernel='precomputed')
-    mvml.fit(kernel_dict, Y[0:n_tr])
-
-
-    #
-
-    pred1 = np.sign(mvml.predict(test_kernel_dict) ) # take sign for classification result
-    #
-    # without approximation
-    mvml2 = MVML(lmbda=0.1, eta=1, nystrom_param=1, kernel='precomputed')   # without approximation
-    mvml2.fit(kernel_dict, Y[0:n_tr])
-    pred2 = np.sign(mvml2.predict(test_kernel_dict))
-    #
-    # use MVML_Cov, don't learn w
-    mvml3 = MVML(lmbda=0.1, eta=1,learn_A=3, nystrom_param=1, kernel='precomputed')
-    mvml3.fit(kernel_dict, Y[0:n_tr])
-    pred3 = np.sign(mvml.predict(test_kernel_dict))
-    #
-    # use MVML_I, don't learn w
-    mvml3 = MVML(lmbda=0.1, eta=1,learn_A=4, nystrom_param=1, kernel='precomputed')
-
-    pred4 = np.sign(mvml.predict(test_kernel_dict))
-    #
-    #
-    # # =========== show results ============
-    #
-    # # accuracies
-    acc1 = accuracy_score(Y[n_tr:n_tot], pred1)
-    acc2 = accuracy_score(Y[n_tr:n_tot], pred2)
-    acc3 = accuracy_score(Y[n_tr:n_tot], pred3)
-    acc4 = accuracy_score(Y[n_tr:n_tot], pred4)
-    #
-    # # display obtained accuracies
-    #
-    print("MVML:       ", acc1)
-    print("MVMLsparse: ", acc2)
-    print("MVML_Cov:   ", acc3)
-    print("MVML_I:     ", acc4)
-    #
-    #
-    # # plot data and some classification results
-    #
-    plt.figure(2, figsize=(10., 8.))
-    plt.subplot(341)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-    plt.title("orig. view 1")
-    plt.subplot(342)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=Y[n_tr:n_tot])
-    plt.title("orig. view 2")
-    #
-    pred1[np.where(pred1[:, 0] != Y[n_tr:n_tot])] = 0
-    pred1 = pred1.reshape((pred1.shape[0]))
-    plt.subplot(343)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred1)
-    plt.title("MVML view 1")
-    plt.subplot(344)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred1)
-    plt.title("MVML view 2")
-    #
-    pred2[np.where(pred2[:, 0] != Y[n_tr:n_tot])] = 0
-    pred2 = pred2.reshape((pred2.shape[0]))
-    plt.subplot(345)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred2)
-    plt.title("MVMLsparse view 1")
-    plt.subplot(346)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred2)
-    plt.title("MVMLsparse view 2")
-    #
-    pred3[np.where(pred3[:, 0] != Y[n_tr:n_tot])] = 0
-    pred3 = pred3.reshape((pred3.shape[0]))
-    #
-    plt.subplot(347)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred3)
-    plt.title("MVML_Cov view 1")
-    plt.subplot(348)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred3)
-    plt.title("MVML_Cov view 2")
-    #
-    pred4[np.where(pred4[:, 0] != Y[n_tr:n_tot])] = 0
-    pred4 = pred4.reshape((pred4.shape[0]))
-    plt.subplot(349)
-    plt.scatter(X0[n_tr:n_tot, 0], X0[n_tr:n_tot, 1], c=pred4)
-    plt.title("MVML_I view 1")
-    plt.subplot(3,4,10)
-    plt.scatter(X1[n_tr:n_tot, 0], X1[n_tr:n_tot, 1], c=pred4)
-    plt.title("MVML_I view 2")
-    #
-    plt.show()
-
-
-.. rst-class:: sphx-glr-timing
-
-   **Total running time of the script:** ( 0 minutes  0.000 seconds)
-
-
-.. _sphx_glr_download_tutorial_auto_examples_mvml_plot_.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
-    :class: sphx-glr-footer-example
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Python source code: mvml_plot_.py <mvml_plot_.py>`
-
-
-
-  .. container:: sphx-glr-download
-
-     :download:`Download Jupyter notebook: mvml_plot_.ipynb <mvml_plot_.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
-    `Gallery generated by Sphinx-Gallery <https://sphinx-gallery.github.io>`_
diff --git a/doc/tutorial/auto_examples/mvml_plot__codeobj.pickle b/doc/tutorial/auto_examples/mvml_plot__codeobj.pickle
deleted file mode 100644
index 3a2d50c1dbdf979684f97440eb387b7c4bff82be..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1176
zcmb7E$&M2-5arNGCX0q;G@AJYxb%$!caDq@9Joe~yJp&oH+dln5)vO^PMH6tymUg7
z8Km^7>e=<G>Q%XaG{1i~&+-?RRB;Z}F(qvb%a5i51KoQ@UHlc_e2%ZfT<Vr+4YG^h
z(QPCV5W}=WVJcMTIU*D3h2F=oD!|(I%DC9ieOQ>y{dx;V0#6kEbcFO5;nr?B5F=Xw
z&M<0QO_<p+3$b7P=g^1ijyPh$*;bG(tSK{;gRMzcBMj5L7}5+;iT)JyVTLVR_8==!
zlWioZE%uB4)`zQsrQ{YWDOlUfnG14G-b>7Di0qVTEkM>A#cHyRJgcm5y^(4k13p}6
z?poA+cPKpB82M5Vr@3;RZCbrgRtkRv_2H6G>J6d$wWZ2HW%?II&gX-gb!dt41oNS}
z`*QbTs3+SPdm}t|3>Ss)wk!rt@T=g7<JIX|#MV%nZ(!6Q@uWD&bsWmuGC3s=E6`+n
zbm^TB%PbQm&Vi|w&1`k??jy21CD!$EU>c0@j--cFIhTJ!$UB}EQjP&1=BX~n9KNQq
zN0Z{5SJNV~+_MLN7~Zsc=kX-PcRl_MhGL3?BuB+R4PyFDUaC^|%vSfTgueX)tg^>(

diff --git a/doc/tutorial/auto_examples/sg_execution_times.rst b/doc/tutorial/auto_examples/sg_execution_times.rst
deleted file mode 100644
index 6210534..0000000
--- a/doc/tutorial/auto_examples/sg_execution_times.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-Computation times
-=================
-**00:02.336** total execution time for **tutorial_auto_examples** files:
-
-
-.. toctree::
-   :maxdepth: 2
-   :caption: Contents:
-
-
-   tutorial/auto_examples/mumbo/sg_execution_times
-   tutorial/auto_examples/cumbo/sg_execution_times
-   tutorial/auto_examples/mvml/sg_execution_times
diff --git a/doc/tutorial/times.rst b/doc/tutorial/times.rst
new file mode 100644
index 0000000..8350bcb
--- /dev/null
+++ b/doc/tutorial/times.rst
@@ -0,0 +1,16 @@
+
+
+Computation times
+=================
+
+total execution time for **tutorial_auto_examples** files:
+
+
+.. toctree::
+   :maxdepth: 3
+   :caption: Contents:
+
+
+   auto_examples/mumbo/sg_execution_times
+   auto_examples/cumbo/sg_execution_times
+   auto_examples/mvml/sg_execution_times
diff --git a/examples/cumbo/README.txt b/examples/cumbo/README.txt
index 41a7732..03dcac9 100644
--- a/examples/cumbo/README.txt
+++ b/examples/cumbo/README.txt
@@ -1,7 +1,3 @@
-.. _examples:
-
-Examples
-========
 
 MuCuMBo Examples
 ----------------
diff --git a/examples/mumbo/README.txt b/examples/mumbo/README.txt
index f749137..83c7006 100644
--- a/examples/mumbo/README.txt
+++ b/examples/mumbo/README.txt
@@ -1,7 +1,3 @@
-.. _examples:
-
-Examples
-========
 
 MuMBo Examples
 --------------
diff --git a/examples/mvml/README.txt b/examples/mvml/README.txt
index b310f9f..7a325f2 100644
--- a/examples/mvml/README.txt
+++ b/examples/mvml/README.txt
@@ -1,10 +1,6 @@
-.. _examples:
 
-Examples
-========
-
-MVML
-----
+MVML Examples
+-------------
 
 The following toy examples illustrate how the MVML algorithm
 
-- 
GitLab